hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3cb2063e2e13d9df8c308ae92078bda0ec502e | 9,826 | py | Python | allocation/tests/test_allocation_model.py | SuviVappula/tilavarauspalvelu-core | ad7dec36e392a7b2927e2f825c3b0eb29b700793 | [
"MIT"
] | null | null | null | allocation/tests/test_allocation_model.py | SuviVappula/tilavarauspalvelu-core | ad7dec36e392a7b2927e2f825c3b0eb29b700793 | [
"MIT"
] | 90 | 2020-11-13T07:42:32.000Z | 2022-03-29T08:54:20.000Z | allocation/tests/test_allocation_model.py | SuviVappula/tilavarauspalvelu-core | ad7dec36e392a7b2927e2f825c3b0eb29b700793 | [
"MIT"
] | 8 | 2021-02-10T11:31:22.000Z | 2022-01-28T14:33:47.000Z | import datetime
from unittest import mock
import pytest
from assertpy import assert_that
from django.conf import settings
from django.utils import timezone
from allocation.allocation_data_builder import AllocationDataBuilder
from allocation.allocation_models import ALLOCATION_PRECISION
from allocation.tests.conftest import get_default_end, get_default_start
from applications.models import ApplicationStatus
from opening_hours.hours import TimeElement
def every_second_day(p_start, p_end):
dates = []
start = p_start
delta = datetime.timedelta(days=2)
while start <= p_end:
dates.append(start)
start += delta
return dates
def get_opening_hour_data(*args, **kwargs):
if len(args) < 3:
return []
(id, start, end) = args
dates = every_second_day(start, end)
response = []
for date in dates:
response.append(
{
"resource_id": id,
"date": date,
"times": [
TimeElement(
start_time=datetime.time(
hour=14, tzinfo=timezone.get_default_timezone()
),
end_time=datetime.time(
hour=18, tzinfo=timezone.get_default_timezone()
),
end_time_on_next_day=False,
)
],
}
)
return response
@mock.patch(
"allocation.allocation_data_builder.get_opening_hours",
side_effect=get_opening_hour_data,
)
@pytest.mark.django_db
def test_should_map_application_round_dates(
mocked_opening_hours, application_round_with_reservation_units
):
mocked_opening_hours()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.period_start).is_equal_to(get_default_start())
assert_that(data.period_end).is_equal_to(get_default_end())
@pytest.mark.django_db
def test_should_map_reservation_unit_open_times_with_mock_data(
application_with_reservation_units, application_round_with_reservation_units
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
times = [
[available.start, available.end]
for available in data.spaces[
application_round_with_reservation_units.reservation_units.all()[0].id
].available_times.values()
]
# Open every day in application period from 10.00 to 22.00
expected = [
[
round((i * 24 + 10) * 60 // ALLOCATION_PRECISION),
round((i * 24 + 22) * 60 // ALLOCATION_PRECISION),
]
for i in range(31)
]
assert_that(times).is_equal_to(expected)
@mock.patch(
"allocation.allocation_data_builder.get_opening_hours",
side_effect=get_opening_hour_data,
)
@pytest.mark.django_db
def test_should_map_reservation_unit_open_times_from_hauki(
application_with_reservation_units, application_round_with_reservation_units
):
settings.HAUKI_API_URL = "http://test.com"
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
times = [
[available.start, available.end]
for available in data.spaces[
application_round_with_reservation_units.reservation_units.all()[0].id
].available_times.values()
]
# Open every second day from 14 to 18
expected = [
[
round((i * 24 + 14) * 60 // ALLOCATION_PRECISION),
round((i * 24 + 18) * 60 // ALLOCATION_PRECISION),
]
for i in range(31)
if i % 2 == 0
]
assert_that(times).is_equal_to(expected)
@pytest.mark.django_db
def test_should_map_application_events(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
dates = []
start = datetime.datetime(2020, 1, 6, 10, 0)
delta = datetime.timedelta(days=7)
while start <= datetime.datetime(2020, 2, 24, 10, 0):
dates.append(start)
start += delta
assert_that(
data.baskets[None].events[0].occurrences[scheduled_for_monday.id]
).has_occurrences(dates).has_weekday(0)
assert_that(data.baskets[None].events[0]).has_id(recurring_application_event.id)
hour = 60 // ALLOCATION_PRECISION
assert_that(data.baskets[None].events[0]).has_min_duration(hour).has_max_duration(
hour * 2
)
@pytest.mark.django_db
def test_should_exclude_already_accepted_schedules(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
result_scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].occurrences).is_empty()
@pytest.mark.django_db
def test_should_map_units_to_spaces(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
matching_event_reservation_unit,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].space_ids).is_equal_to(
[matching_event_reservation_unit.reservation_unit.id]
)
@pytest.mark.django_db
def test_should_exclude_declined_units(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
matching_event_reservation_unit,
scheduled_for_monday,
):
recurring_application_event.declined_reservation_units.set(
[matching_event_reservation_unit.reservation_unit]
)
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].space_ids).is_equal_to([])
@pytest.mark.django_db
def test_should_handle_none_max_duration(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
recurring_application_event.max_duration = None
recurring_application_event.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
hour = 60 // ALLOCATION_PRECISION
assert_that(data.baskets[None].events[0]).has_min_duration(hour).has_max_duration(
hour
)
@pytest.mark.django_db
def test_should_map_period_start_and_end_from_application_round(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0]).has_period_start(
application_with_reservation_units.application_round.reservation_period_begin
).has_period_end(
application_with_reservation_units.application_round.reservation_period_end
)
@pytest.mark.django_db
def test_mapping_application_round_baskets(
application_round_with_reservation_units,
default_application_round,
application_round_basket_one,
application_round_basket_two,
recurring_application_event,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets).contains_key(
application_round_basket_one.id, application_round_basket_two.id
)
assert_that(data.baskets[application_round_basket_one.id]).has_id(
application_round_basket_one.id
).has_order_number(
application_round_basket_one.order_number
).has_allocation_percentage(
application_round_basket_one.allocation_percentage
)
assert_that(data.baskets[application_round_basket_two.id]).has_id(
application_round_basket_two.id
).has_order_number(
application_round_basket_two.order_number
).has_allocation_percentage(
application_round_basket_two.allocation_percentage
)
@pytest.mark.parametrize(
"application_status",
[ApplicationStatus.CANCELLED, ApplicationStatus.DECLINED],
)
@pytest.mark.django_db
def test_should_exclude_cancelled_and_declined_applications(
application_status,
application_round_with_reservation_units,
recurring_application_event,
):
recurring_application_event.application.status = application_status
recurring_application_event.application.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events).is_empty()
@pytest.mark.parametrize(
"application_status",
[ApplicationStatus.IN_REVIEW, ApplicationStatus.REVIEW_DONE],
)
@pytest.mark.django_db
def test_should_include_not_cancelled_or_declined_applications(
application_status,
application_round_with_reservation_units,
recurring_application_event,
):
recurring_application_event.application.status = application_status
recurring_application_event.application.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events).is_length(1)
| 30.233846 | 86 | 0.736821 | import datetime
from unittest import mock
import pytest
from assertpy import assert_that
from django.conf import settings
from django.utils import timezone
from allocation.allocation_data_builder import AllocationDataBuilder
from allocation.allocation_models import ALLOCATION_PRECISION
from allocation.tests.conftest import get_default_end, get_default_start
from applications.models import ApplicationStatus
from opening_hours.hours import TimeElement
def every_second_day(p_start, p_end):
dates = []
start = p_start
delta = datetime.timedelta(days=2)
while start <= p_end:
dates.append(start)
start += delta
return dates
def get_opening_hour_data(*args, **kwargs):
if len(args) < 3:
return []
(id, start, end) = args
dates = every_second_day(start, end)
response = []
for date in dates:
response.append(
{
"resource_id": id,
"date": date,
"times": [
TimeElement(
start_time=datetime.time(
hour=14, tzinfo=timezone.get_default_timezone()
),
end_time=datetime.time(
hour=18, tzinfo=timezone.get_default_timezone()
),
end_time_on_next_day=False,
)
],
}
)
return response
@mock.patch(
"allocation.allocation_data_builder.get_opening_hours",
side_effect=get_opening_hour_data,
)
@pytest.mark.django_db
def test_should_map_application_round_dates(
mocked_opening_hours, application_round_with_reservation_units
):
mocked_opening_hours()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.period_start).is_equal_to(get_default_start())
assert_that(data.period_end).is_equal_to(get_default_end())
@pytest.mark.django_db
def test_should_map_reservation_unit_open_times_with_mock_data(
application_with_reservation_units, application_round_with_reservation_units
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
times = [
[available.start, available.end]
for available in data.spaces[
application_round_with_reservation_units.reservation_units.all()[0].id
].available_times.values()
]
expected = [
[
round((i * 24 + 10) * 60 // ALLOCATION_PRECISION),
round((i * 24 + 22) * 60 // ALLOCATION_PRECISION),
]
for i in range(31)
]
assert_that(times).is_equal_to(expected)
@mock.patch(
"allocation.allocation_data_builder.get_opening_hours",
side_effect=get_opening_hour_data,
)
@pytest.mark.django_db
def test_should_map_reservation_unit_open_times_from_hauki(
application_with_reservation_units, application_round_with_reservation_units
):
settings.HAUKI_API_URL = "http://test.com"
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
times = [
[available.start, available.end]
for available in data.spaces[
application_round_with_reservation_units.reservation_units.all()[0].id
].available_times.values()
]
expected = [
[
round((i * 24 + 14) * 60 // ALLOCATION_PRECISION),
round((i * 24 + 18) * 60 // ALLOCATION_PRECISION),
]
for i in range(31)
if i % 2 == 0
]
assert_that(times).is_equal_to(expected)
@pytest.mark.django_db
def test_should_map_application_events(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
dates = []
start = datetime.datetime(2020, 1, 6, 10, 0)
delta = datetime.timedelta(days=7)
while start <= datetime.datetime(2020, 2, 24, 10, 0):
dates.append(start)
start += delta
assert_that(
data.baskets[None].events[0].occurrences[scheduled_for_monday.id]
).has_occurrences(dates).has_weekday(0)
assert_that(data.baskets[None].events[0]).has_id(recurring_application_event.id)
hour = 60 // ALLOCATION_PRECISION
assert_that(data.baskets[None].events[0]).has_min_duration(hour).has_max_duration(
hour * 2
)
@pytest.mark.django_db
def test_should_exclude_already_accepted_schedules(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
result_scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].occurrences).is_empty()
@pytest.mark.django_db
def test_should_map_units_to_spaces(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
matching_event_reservation_unit,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].space_ids).is_equal_to(
[matching_event_reservation_unit.reservation_unit.id]
)
@pytest.mark.django_db
def test_should_exclude_declined_units(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
matching_event_reservation_unit,
scheduled_for_monday,
):
recurring_application_event.declined_reservation_units.set(
[matching_event_reservation_unit.reservation_unit]
)
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0].space_ids).is_equal_to([])
@pytest.mark.django_db
def test_should_handle_none_max_duration(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
recurring_application_event.max_duration = None
recurring_application_event.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
hour = 60 // ALLOCATION_PRECISION
assert_that(data.baskets[None].events[0]).has_min_duration(hour).has_max_duration(
hour
)
@pytest.mark.django_db
def test_should_map_period_start_and_end_from_application_round(
application_round_with_reservation_units,
application_with_reservation_units,
recurring_application_event,
scheduled_for_monday,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events[0]).has_period_start(
application_with_reservation_units.application_round.reservation_period_begin
).has_period_end(
application_with_reservation_units.application_round.reservation_period_end
)
@pytest.mark.django_db
def test_mapping_application_round_baskets(
application_round_with_reservation_units,
default_application_round,
application_round_basket_one,
application_round_basket_two,
recurring_application_event,
):
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets).contains_key(
application_round_basket_one.id, application_round_basket_two.id
)
assert_that(data.baskets[application_round_basket_one.id]).has_id(
application_round_basket_one.id
).has_order_number(
application_round_basket_one.order_number
).has_allocation_percentage(
application_round_basket_one.allocation_percentage
)
assert_that(data.baskets[application_round_basket_two.id]).has_id(
application_round_basket_two.id
).has_order_number(
application_round_basket_two.order_number
).has_allocation_percentage(
application_round_basket_two.allocation_percentage
)
@pytest.mark.parametrize(
"application_status",
[ApplicationStatus.CANCELLED, ApplicationStatus.DECLINED],
)
@pytest.mark.django_db
def test_should_exclude_cancelled_and_declined_applications(
application_status,
application_round_with_reservation_units,
recurring_application_event,
):
recurring_application_event.application.status = application_status
recurring_application_event.application.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events).is_empty()
@pytest.mark.parametrize(
"application_status",
[ApplicationStatus.IN_REVIEW, ApplicationStatus.REVIEW_DONE],
)
@pytest.mark.django_db
def test_should_include_not_cancelled_or_declined_applications(
application_status,
application_round_with_reservation_units,
recurring_application_event,
):
recurring_application_event.application.status = application_status
recurring_application_event.application.save()
data = AllocationDataBuilder(
application_round=application_round_with_reservation_units
).get_allocation_data()
assert_that(data.baskets[None].events).is_length(1)
| true | true |
1c3cb2ef93da1d4961633ca0dadd632f8ceb0772 | 3,491 | py | Python | pyfos/utils/fru/powersupply_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 44 | 2017-11-17T12:03:11.000Z | 2022-02-03T20:57:56.000Z | pyfos/utils/fru/powersupply_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 13 | 2018-10-09T15:34:15.000Z | 2022-02-24T20:03:17.000Z | pyfos/utils/fru/powersupply_show.py | madhavinaiduprathap/pyfosbrocade | ec100e77c441761c3e688f1d8e5d18ad38cc83f4 | [
"Apache-2.0"
] | 23 | 2017-12-14T18:08:33.000Z | 2022-02-03T15:33:40.000Z | #!/usr/bin/env python3
# Copyright 2018 Brocade Communications Systems LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may also obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`powersupply_show` - PyFOS util to show the power supply unit information.
**********************************************************************************
The :mod:`powersupply_show` util shows the power supply unit information.
This module can be used to display the power supply unit information.
* Input:
| Infrastructure Options:
| -i,--ipaddr=IPADDR The IP address of the FOS switch.
| -L,--login=LOGIN The login name.
| -P,--password=PASSWORD The password.
| -f,--vfid=VFID The VFID to which the request is \
directed [OPTIONAL].
| -s,--secured=MODE The HTTPS mode "self" or "CA" [OPTIONAL].
| -v,--verbose Verbose mode [OPTIONAL].
| Util Script Options:
| --unit-number=unit-number Sets the unit number.
* Output:
* Power supply unit information. When the unit number is not provided,
all power supply units will be displayed.
.. function:: ps_unit_show.show_ps_unit(session, unit)
* Displays the power supply unit details.
Example Usage of the Method::
# Example 1: Display all the ps_units
ret = ps_unit_show.show_ps_unit(session, None)
print (ret)
# Example 2: Display a specific ps-unit 1
ret = ps_unit_show.show_ps_unit(session, 1)
print (ret)
Details::
ps_obj = power_supply()
if unit-number is None: # All powersupply units
result = ps_obj.get(session, None)
else:
result = ps_obj.get(session, unit)
* Input:
:param session: The session returned by the login.
:param unit: The specific unit number or none for all \
power supply units.
* Output:
:rtype: A dictionary of return status matching the REST response.
*Use Cases*
1. Retrieve the power supply unit information.
"""
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_fru import power_supply
from pyfos.utils import brcd_util
def show_ps_unit(session, unit):
ps_obj = power_supply()
if unit is None:
result = ps_obj.get(session, None)
else:
result = ps_obj.get(session, unit)
return result
def main(argv):
# Print arguments
# print(sys.argv[1:])
filters = ['unit_number']
inputs = brcd_util.parse(argv, power_supply, filters)
ps_obj = inputs['utilobject']
session = brcd_util.getsession(inputs)
# pyfos_util.response_print(inputs['utilobject'].displaycustomcli())
result = show_ps_unit(inputs['session'],
ps_obj.peek_unit_number())
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| 30.094828 | 82 | 0.640504 |
import sys
from pyfos import pyfos_auth
from pyfos import pyfos_util
from pyfos.pyfos_brocade_fru import power_supply
from pyfos.utils import brcd_util
def show_ps_unit(session, unit):
ps_obj = power_supply()
if unit is None:
result = ps_obj.get(session, None)
else:
result = ps_obj.get(session, unit)
return result
def main(argv):
filters = ['unit_number']
inputs = brcd_util.parse(argv, power_supply, filters)
ps_obj = inputs['utilobject']
session = brcd_util.getsession(inputs)
result = show_ps_unit(inputs['session'],
ps_obj.peek_unit_number())
pyfos_util.response_print(result)
pyfos_auth.logout(session)
if __name__ == "__main__":
main(sys.argv[1:])
| true | true |
1c3cb466a02a1e0c4d40fecb679fd01dc878271d | 702 | py | Python | hook.py | leonardogavaudan/emu | df263a9143c801028a7593895be8e647b8227617 | [
"Apache-2.0"
] | null | null | null | hook.py | leonardogavaudan/emu | df263a9143c801028a7593895be8e647b8227617 | [
"Apache-2.0"
] | null | null | null | hook.py | leonardogavaudan/emu | df263a9143c801028a7593895be8e647b8227617 | [
"Apache-2.0"
] | null | null | null | import os
import shutil
from app.utility.base_world import BaseWorld
from plugins.emu.app.emu_svc import EmuService
name = 'Emu'
description = 'The collection of abilities from the CTID Adversary Emulation Plans'
address = None
access = BaseWorld.Access.RED
data_dir = os.path.join('plugins', name.lower(), 'data')
async def enable(services):
plugin_svc = EmuService()
if not os.path.isdir(plugin_svc.repo_dir):
await plugin_svc.clone_repo()
for directory in ["abilities", "adversaries", "sources"]:
full_path = os.path.join(data_dir, directory)
if os.path.isdir(full_path):
shutil.rmtree(full_path)
await plugin_svc.populate_data_directory()
| 27 | 83 | 0.722222 | import os
import shutil
from app.utility.base_world import BaseWorld
from plugins.emu.app.emu_svc import EmuService
name = 'Emu'
description = 'The collection of abilities from the CTID Adversary Emulation Plans'
address = None
access = BaseWorld.Access.RED
data_dir = os.path.join('plugins', name.lower(), 'data')
async def enable(services):
plugin_svc = EmuService()
if not os.path.isdir(plugin_svc.repo_dir):
await plugin_svc.clone_repo()
for directory in ["abilities", "adversaries", "sources"]:
full_path = os.path.join(data_dir, directory)
if os.path.isdir(full_path):
shutil.rmtree(full_path)
await plugin_svc.populate_data_directory()
| true | true |
1c3cb5a8ffd9fbacef60a0be301fe4f5ae217ce2 | 3,610 | py | Python | tests/test_0013-rntuple-anchor.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 133 | 2020-05-08T21:34:11.000Z | 2022-03-07T18:12:58.000Z | tests/test_0013-rntuple-anchor.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 269 | 2020-05-13T02:42:24.000Z | 2022-03-24T20:24:16.000Z | tests/test_0013-rntuple-anchor.py | eic/uproot4 | deb8d88c2643521f372bf5005c51af8926016c7e | [
"BSD-3-Clause"
] | 45 | 2020-05-15T17:48:04.000Z | 2022-03-18T19:23:07.000Z | # BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import json
import sys
try:
import queue
except ImportError:
import Queue as queue
import numpy
import pytest
import skhep_testdata
import uproot
def test():
filename = skhep_testdata.data_path("uproot-ntpl001_staff.root")
with uproot.open(filename) as f:
obj = f["Staff"]
assert obj.member("fVersion") == 0
assert obj.member("fSize") == 48
assert obj.member("fSeekHeader") == 854
assert obj.member("fNBytesHeader") == 537
assert obj.member("fLenHeader") == 2495
assert obj.member("fSeekFooter") == 72369
assert obj.member("fNBytesFooter") == 285
assert obj.member("fLenFooter") == 804
assert obj.member("fReserved") == 0
header_start = obj.member("fSeekHeader")
header_stop = header_start + obj.member("fNBytesHeader")
header_chunk = f.file.source.chunk(header_start, header_stop)
print("HEADER")
cursor = uproot.Cursor(header_start)
cursor.debug(header_chunk, 80)
print("\n")
notifications = queue.Queue()
footer_start = obj.member("fSeekFooter")
footer_stop = footer_start + obj.member("fNBytesFooter")
header_chunk, footer_chunk = f.file.source.chunks(
[(header_start, header_stop), (footer_start, footer_stop)],
notifications,
)
print("FOOTER")
cursor = uproot.Cursor(footer_start)
cursor.debug(footer_chunk, 80)
print("\n")
# HEADER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 16 2 0 191 9 0 198 14 105 8 80 63 75 128 117 0 0
# L 4 --- --- --- --- --- --- --- --- --- i --- P ? K --- u --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 0 187 9 0 1 0 144 5 0 0 0 83 116 97 102 102 13 0 255
# --- --- --- --- --- --- --- --- --- --- --- --- S t a f f --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 6 16 0 0 0 117 110 100 101 102 105 110 101 100 32 97 117 116 104 111
# --- --- --- --- --- u n d e f i n e d a u t h o
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 114 0 1 0 4 47 24 0 1 0 3 31 12 12 0 0 4 8 0 110
# r --- --- --- --- / --- --- --- --- --- --- --- --- --- --- --- --- --- n
# FOOTER
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 76 52 1 20 1 0 36 3 0 86 138 213 67 60 183 39 139 27 0 1
# L 4 --- --- --- --- $ --- --- V --- --- C < --- ' --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 23 1 12 0 23 12 12 0 42 72 0 1 0 47 24 0 1 0 7
# --- --- --- --- --- --- --- --- --- * H --- --- --- / --- --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 34 26 13 8 0 34 145 5 8 0 34 213 9 86 0 27 13 84 0 0
# " --- --- --- --- " --- --- --- --- " --- --- V --- --- --- T --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 1 0 102 52 26 0 0 148 1 124 0 0 16 0 34 102 15 17 0 34
# --- --- f 4 --- --- --- --- --- | --- --- --- --- " f --- --- --- "
| 42.470588 | 83 | 0.380332 |
from __future__ import absolute_import
import json
import sys
try:
import queue
except ImportError:
import Queue as queue
import numpy
import pytest
import skhep_testdata
import uproot
def test():
filename = skhep_testdata.data_path("uproot-ntpl001_staff.root")
with uproot.open(filename) as f:
obj = f["Staff"]
assert obj.member("fVersion") == 0
assert obj.member("fSize") == 48
assert obj.member("fSeekHeader") == 854
assert obj.member("fNBytesHeader") == 537
assert obj.member("fLenHeader") == 2495
assert obj.member("fSeekFooter") == 72369
assert obj.member("fNBytesFooter") == 285
assert obj.member("fLenFooter") == 804
assert obj.member("fReserved") == 0
header_start = obj.member("fSeekHeader")
header_stop = header_start + obj.member("fNBytesHeader")
header_chunk = f.file.source.chunk(header_start, header_stop)
print("HEADER")
cursor = uproot.Cursor(header_start)
cursor.debug(header_chunk, 80)
print("\n")
notifications = queue.Queue()
footer_start = obj.member("fSeekFooter")
footer_stop = footer_start + obj.member("fNBytesFooter")
header_chunk, footer_chunk = f.file.source.chunks(
[(header_start, header_stop), (footer_start, footer_stop)],
notifications,
)
print("FOOTER")
cursor = uproot.Cursor(footer_start)
cursor.debug(footer_chunk, 80)
print("\n")
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 0 23 1 12 0 23 12 12 0 42 72 0 1 0 47 24 0 1 0 7
# --- --- --- --- --- --- --- --- --- * H --- --- --- / --- --- --- --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 34 26 13 8 0 34 145 5 8 0 34 213 9 86 0 27 13 84 0 0
# " --- --- --- --- " --- --- --- --- " --- --- V --- --- --- T --- ---
# --+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+-
# 1 0 102 52 26 0 0 148 1 124 0 0 16 0 34 102 15 17 0 34
# --- --- f 4 --- --- --- --- --- | --- --- --- --- " f --- --- --- "
| true | true |
1c3cb5e2b33c7d4bedf3b9036ac977eecc09181b | 794 | py | Python | examples/structured_configs_tutorial/2_node_path/my_app.py | dylanturpin/hydra | 6478511aee837491aab34a1e62c43c1c6ef730b9 | [
"MIT"
] | 2 | 2019-06-12T17:22:38.000Z | 2020-06-10T07:58:37.000Z | examples/structured_configs_tutorial/2_node_path/my_app.py | dylanturpin/hydra | 6478511aee837491aab34a1e62c43c1c6ef730b9 | [
"MIT"
] | null | null | null | examples/structured_configs_tutorial/2_node_path/my_app.py | dylanturpin/hydra | 6478511aee837491aab34a1e62c43c1c6ef730b9 | [
"MIT"
] | 2 | 2019-01-16T05:31:35.000Z | 2020-04-10T22:00:01.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from dataclasses import dataclass
from omegaconf import DictConfig
import hydra
from hydra.core.config_store import ConfigStore
@dataclass
class MySQLConfig:
driver: str = "mysql"
host: str = "localhost"
port: int = 3306
user: str = "omry"
password: str = "secret"
ConfigStore.instance().store(node=MySQLConfig, name="config", path="db")
@hydra.main(config_name="config")
def my_app(cfg: DictConfig) -> None:
# In order to get type safety you need to tell Python that the type of cfg.db is MySQLConfig:
db: MySQLConfig = cfg.db
print(
f"Connecting to {db.driver} at {db.host}:{db.port}, user={db.user}, password={db.password}"
)
if __name__ == "__main__":
my_app()
| 24.060606 | 99 | 0.691436 |
from dataclasses import dataclass
from omegaconf import DictConfig
import hydra
from hydra.core.config_store import ConfigStore
@dataclass
class MySQLConfig:
driver: str = "mysql"
host: str = "localhost"
port: int = 3306
user: str = "omry"
password: str = "secret"
ConfigStore.instance().store(node=MySQLConfig, name="config", path="db")
@hydra.main(config_name="config")
def my_app(cfg: DictConfig) -> None:
db: MySQLConfig = cfg.db
print(
f"Connecting to {db.driver} at {db.host}:{db.port}, user={db.user}, password={db.password}"
)
if __name__ == "__main__":
my_app()
| true | true |
1c3cb69cc713ba2f6020e79a4c0b2869a86d2cf2 | 3,149 | py | Python | test/functional/mempool_limit.py | sirgreyhat/verge | 3181a2658e01d2d8dceed6f57dca356d94c92be1 | [
"MIT"
] | 1,787 | 2016-02-20T23:38:23.000Z | 2020-02-11T14:10:01.000Z | test/functional/mempool_limit.py | sirgreyhat/verge | 3181a2658e01d2d8dceed6f57dca356d94c92be1 | [
"MIT"
] | 824 | 2016-03-09T22:08:06.000Z | 2020-01-24T12:01:15.000Z | test/functional/mempool_limit.py | sirgreyhat/verge | 3181a2658e01d2d8dceed6f57dca356d94c92be1 | [
"MIT"
] | 577 | 2016-02-10T20:26:47.000Z | 2020-01-13T09:22:44.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool limiting together/eviction with the wallet."""
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
class MempoolLimitTest(VergeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
# specifically fund this tx with a fee < mempoolminfee, >= than minrelaytxfee
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
| 48.446154 | 118 | 0.667196 |
from test_framework.test_framework import VergeTestFramework
from test_framework.util import *
class MempoolLimitTest(VergeTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxmempool=5", "-spendzeroconfchange=0"]]
def run_test(self):
txouts = gen_return_txouts()
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
self.log.info('Check that mempoolminfee is minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_equal(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
txids = []
utxos = create_confirmed_utxos(relayfee, self.nodes[0], 91)
self.log.info('Create a mempool tx that will be evicted')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
self.nodes[0].settxfee(relayfee)
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0)
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in range (3):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], txouts, utxos[30*i:30*i+30], 30, (i+1)*base_fee)
self.log.info('The tx should be evicted by now')
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0)
self.log.info('Check that mempoolminfee is larger than minrelytxfee')
assert_equal(self.nodes[0].getmempoolinfo()['minrelaytxfee'], Decimal('0.00001000'))
assert_greater_than(self.nodes[0].getmempoolinfo()['mempoolminfee'], Decimal('0.00001000'))
self.log.info('Create a mempool tx that will not pass mempoolminfee')
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, outputs)
txF = self.nodes[0].fundrawtransaction(tx, {'feeRate': relayfee})
txFS = self.nodes[0].signrawtransactionwithwallet(txF['hex'])
assert_raises_rpc_error(-26, "mempool min fee not met", self.nodes[0].sendrawtransaction, txFS['hex'])
if __name__ == '__main__':
MempoolLimitTest().main()
| true | true |
1c3cb76b137c8a4ff0c211834b0357f6db6281d5 | 5,081 | py | Python | demisto_client/demisto_api/models/version.py | ekmixon/demisto-py | 187163a148cb782b289c71d97ec4efffa898ec94 | [
"Apache-2.0"
] | 59 | 2017-05-04T05:48:00.000Z | 2022-02-27T21:06:01.000Z | demisto_client/demisto_api/models/version.py | ekmixon/demisto-py | 187163a148cb782b289c71d97ec4efffa898ec94 | [
"Apache-2.0"
] | 44 | 2017-05-09T17:42:43.000Z | 2022-03-30T05:55:44.000Z | demisto_client/demisto_api/models/version.py | ekmixon/demisto-py | 187163a148cb782b289c71d97ec4efffa898ec94 | [
"Apache-2.0"
] | 37 | 2017-05-06T04:30:32.000Z | 2022-02-15T04:59:00.000Z | # coding: utf-8
"""
Demisto API
This is the public REST API to integrate with the demisto server. HTTP request can be sent using any HTTP-client. For an example dedicated client take a look at: https://github.com/demisto/demisto-py. Requests must include API-key that can be generated in the Demisto web client under 'Settings' -> 'Integrations' -> 'API keys' Optimistic Locking and Versioning\\: When using Demisto REST API, you will need to make sure to work on the latest version of the item (incident, entry, etc.), otherwise, you will get a DB version error (which not allow you to override a newer item). In addition, you can pass 'version\\: -1' to force data override (make sure that other users data might be lost). Assume that Alice and Bob both read the same data from Demisto server, then they both changed the data, and then both tried to write the new versions back to the server. Whose changes should be saved? Alice’s? Bob’s? To solve this, each data item in Demisto has a numeric incremental version. If Alice saved an item with version 4 and Bob trying to save the same item with version 3, Demisto will rollback Bob request and returns a DB version conflict error. Bob will need to get the latest item and work on it so Alice work will not get lost. Example request using 'curl'\\: ``` curl 'https://hostname:443/incidents/search' -H 'content-type: application/json' -H 'accept: application/json' -H 'Authorization: <API Key goes here>' --data-binary '{\"filter\":{\"query\":\"-status:closed -category:job\",\"period\":{\"by\":\"day\",\"fromValue\":7}}}' --compressed ``` # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Version(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'digits': 'list[int]',
'label': 'str'
}
attribute_map = {
'digits': 'Digits',
'label': 'Label'
}
def __init__(self, digits=None, label=None): # noqa: E501
"""Version - a model defined in Swagger""" # noqa: E501
self._digits = None
self._label = None
self.discriminator = None
if digits is not None:
self.digits = digits
if label is not None:
self.label = label
@property
def digits(self):
"""Gets the digits of this Version. # noqa: E501
:return: The digits of this Version. # noqa: E501
:rtype: list[int]
"""
return self._digits
@digits.setter
def digits(self, digits):
"""Sets the digits of this Version.
:param digits: The digits of this Version. # noqa: E501
:type: list[int]
"""
self._digits = digits
@property
def label(self):
"""Gets the label of this Version. # noqa: E501
:return: The label of this Version. # noqa: E501
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Version.
:param label: The label of this Version. # noqa: E501
:type: str
"""
self._label = label
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Version, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Version):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.78169 | 1,584 | 0.598701 |
import pprint
import re
import six
class Version(object):
swagger_types = {
'digits': 'list[int]',
'label': 'str'
}
attribute_map = {
'digits': 'Digits',
'label': 'Label'
}
def __init__(self, digits=None, label=None):
self._digits = None
self._label = None
self.discriminator = None
if digits is not None:
self.digits = digits
if label is not None:
self.label = label
@property
def digits(self):
return self._digits
@digits.setter
def digits(self, digits):
self._digits = digits
@property
def label(self):
return self._label
@label.setter
def label(self, label):
self._label = label
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Version, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Version):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3cb84dbc174f8512a27b82679ac9261528c1fb | 34 | py | Python | core/plugins/transifex/__init__.py | purecloudlabs/translation-process-automation | ea65a5c35a9490bce57e6dc0104b1b86f4fc8ddf | [
"MIT"
] | null | null | null | core/plugins/transifex/__init__.py | purecloudlabs/translation-process-automation | ea65a5c35a9490bce57e6dc0104b1b86f4fc8ddf | [
"MIT"
] | null | null | null | core/plugins/transifex/__init__.py | purecloudlabs/translation-process-automation | ea65a5c35a9490bce57e6dc0104b1b86f4fc8ddf | [
"MIT"
] | null | null | null | """ tpa transifex repository.
"""
| 11.333333 | 29 | 0.647059 | true | true | |
1c3cb88540c2987a48b80aa887651be62cf7e943 | 4,075 | py | Python | travis_pypi_setup.py | BenMusch/s3qlite | a35c0ebc3fe35fffb6770e36e12e791ecd1cc250 | [
"MIT"
] | null | null | null | travis_pypi_setup.py | BenMusch/s3qlite | a35c0ebc3fe35fffb6770e36e12e791ecd1cc250 | [
"MIT"
] | 4 | 2018-02-02T04:32:46.000Z | 2018-02-05T15:16:06.000Z | travis_pypi_setup.py | BenMusch/s3qlite | a35c0ebc3fe35fffb6770e36e12e791ecd1cc250 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Update encrypted deploy password in Travis config file."""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GITHUB_REPO = 'benmusch/s3qlite'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key.
Work around keys with incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning."""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
"""Load yaml config file at the given path."""
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
"""Save yaml config file at the given path."""
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Put `encrypted_password` into the deploy section of .travis.yml."""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
"""Add a PyPI password to .travis.yml so that Travis can deploy to PyPI.
Fetch the Travis public key for the repo, and encrypt the PyPI password
with it before adding, so that only Travis can decrypt and use the PyPI
password.
"""
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| 31.835938 | 79 | 0.700613 |
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
GITHUB_REPO = 'benmusch/s3qlite'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| true | true |
1c3cb980120189e86874d69bdc3db897df3db062 | 5,153 | py | Python | eval.py | Robert-Hammond/Super-SloMo | 393bfb3ae15a901ad511635f569e409de5c8f5f9 | [
"MIT"
] | 2,754 | 2018-12-27T02:50:33.000Z | 2022-03-30T07:55:38.000Z | eval.py | Robert-Hammond/Super-SloMo | 393bfb3ae15a901ad511635f569e409de5c8f5f9 | [
"MIT"
] | 95 | 2018-12-28T04:31:25.000Z | 2022-03-26T12:20:07.000Z | eval.py | Robert-Hammond/Super-SloMo | 393bfb3ae15a901ad511635f569e409de5c8f5f9 | [
"MIT"
] | 501 | 2018-12-27T07:21:57.000Z | 2022-03-28T05:41:36.000Z | """
Converts a Video to SuperSloMo version
"""
from time import time
import click
import cv2
import torch
from PIL import Image
import numpy as np
import model
from torchvision import transforms
from torch.functional import F
torch.set_grad_enabled(False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trans_forward = transforms.ToTensor()
trans_backward = transforms.ToPILImage()
if device != "cpu":
mean = [0.429, 0.431, 0.397]
mea0 = [-m for m in mean]
std = [1] * 3
trans_forward = transforms.Compose([trans_forward, transforms.Normalize(mean=mean, std=std)])
trans_backward = transforms.Compose([transforms.Normalize(mean=mea0, std=std), trans_backward])
flow = model.UNet(6, 4).to(device)
interp = model.UNet(20, 5).to(device)
back_warp = None
def setup_back_warp(w, h):
global back_warp
with torch.set_grad_enabled(False):
back_warp = model.backWarp(w, h, device).to(device)
def load_models(checkpoint):
states = torch.load(checkpoint, map_location='cpu')
interp.load_state_dict(states['state_dictAT'])
flow.load_state_dict(states['state_dictFC'])
def interpolate_batch(frames, factor):
frame0 = torch.stack(frames[:-1])
frame1 = torch.stack(frames[1:])
i0 = frame0.to(device)
i1 = frame1.to(device)
ix = torch.cat([i0, i1], dim=1)
flow_out = flow(ix)
f01 = flow_out[:, :2, :, :]
f10 = flow_out[:, 2:, :, :]
frame_buffer = []
for i in range(1, factor):
t = i / factor
temp = -t * (1 - t)
co_eff = [temp, t * t, (1 - t) * (1 - t), temp]
ft0 = co_eff[0] * f01 + co_eff[1] * f10
ft1 = co_eff[2] * f01 + co_eff[3] * f10
gi0ft0 = back_warp(i0, ft0)
gi1ft1 = back_warp(i1, ft1)
iy = torch.cat((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1)
io = interp(iy)
ft0f = io[:, :2, :, :] + ft0
ft1f = io[:, 2:4, :, :] + ft1
vt0 = F.sigmoid(io[:, 4:5, :, :])
vt1 = 1 - vt0
gi0ft0f = back_warp(i0, ft0f)
gi1ft1f = back_warp(i1, ft1f)
co_eff = [1 - t, t]
ft_p = (co_eff[0] * vt0 * gi0ft0f + co_eff[1] * vt1 * gi1ft1f) / \
(co_eff[0] * vt0 + co_eff[1] * vt1)
frame_buffer.append(ft_p)
return frame_buffer
def load_batch(video_in, batch_size, batch, w, h):
if len(batch) > 0:
batch = [batch[-1]]
for i in range(batch_size):
ok, frame = video_in.read()
if not ok:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
frame = frame.resize((w, h), Image.ANTIALIAS)
frame = frame.convert('RGB')
frame = trans_forward(frame)
batch.append(frame)
return batch
def denorm_frame(frame, w0, h0):
frame = frame.cpu()
frame = trans_backward(frame)
frame = frame.resize((w0, h0), Image.BILINEAR)
frame = frame.convert('RGB')
return np.array(frame)[:, :, ::-1].copy()
def convert_video(source, dest, factor, batch_size=10, output_format='mp4v', output_fps=30):
vin = cv2.VideoCapture(source)
count = vin.get(cv2.CAP_PROP_FRAME_COUNT)
w0, h0 = int(vin.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vin.get(cv2.CAP_PROP_FRAME_HEIGHT))
codec = cv2.VideoWriter_fourcc(*output_format)
vout = cv2.VideoWriter(dest, codec, float(output_fps), (w0, h0))
w, h = (w0 // 32) * 32, (h0 // 32) * 32
setup_back_warp(w, h)
done = 0
batch = []
while True:
batch = load_batch(vin, batch_size, batch, w, h)
if len(batch) == 1:
break
done += len(batch) - 1
intermediate_frames = interpolate_batch(batch, factor)
intermediate_frames = list(zip(*intermediate_frames))
for fid, iframe in enumerate(intermediate_frames):
vout.write(denorm_frame(batch[fid], w0, h0))
for frm in iframe:
vout.write(denorm_frame(frm, w0, h0))
try:
yield len(batch), done, count
except StopIteration:
break
vout.write(denorm_frame(batch[0], w0, h0))
vin.release()
vout.release()
@click.command('Evaluate Model by converting a low-FPS video to high-fps')
@click.argument('input')
@click.option('--checkpoint', help='Path to model checkpoint')
@click.option('--output', help='Path to output file to save')
@click.option('--batch', default=2, help='Number of frames to process in single forward pass')
@click.option('--scale', default=4, help='Scale Factor of FPS')
@click.option('--fps', default=30, help='FPS of output video')
def main(input, checkpoint, output, batch, scale, fps):
avg = lambda x, n, x0: (x * n/(n+1) + x0 / (n+1), n+1)
load_models(checkpoint)
t0 = time()
n0 = 0
fpx = 0
for dl, fd, fc in convert_video(input, output, int(scale), int(batch), output_fps=int(fps)):
fpx, n0 = avg(fpx, n0, dl / (time() - t0))
prg = int(100*fd/fc)
eta = (fc - fd) / fpx
print('\rDone: {:03d}% FPS: {:05.2f} ETA: {:.2f}s'.format(prg, fpx, eta) + ' '*5, end='')
t0 = time()
if __name__ == '__main__':
main()
| 28.949438 | 99 | 0.605278 | from time import time
import click
import cv2
import torch
from PIL import Image
import numpy as np
import model
from torchvision import transforms
from torch.functional import F
torch.set_grad_enabled(False)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trans_forward = transforms.ToTensor()
trans_backward = transforms.ToPILImage()
if device != "cpu":
mean = [0.429, 0.431, 0.397]
mea0 = [-m for m in mean]
std = [1] * 3
trans_forward = transforms.Compose([trans_forward, transforms.Normalize(mean=mean, std=std)])
trans_backward = transforms.Compose([transforms.Normalize(mean=mea0, std=std), trans_backward])
flow = model.UNet(6, 4).to(device)
interp = model.UNet(20, 5).to(device)
back_warp = None
def setup_back_warp(w, h):
global back_warp
with torch.set_grad_enabled(False):
back_warp = model.backWarp(w, h, device).to(device)
def load_models(checkpoint):
states = torch.load(checkpoint, map_location='cpu')
interp.load_state_dict(states['state_dictAT'])
flow.load_state_dict(states['state_dictFC'])
def interpolate_batch(frames, factor):
frame0 = torch.stack(frames[:-1])
frame1 = torch.stack(frames[1:])
i0 = frame0.to(device)
i1 = frame1.to(device)
ix = torch.cat([i0, i1], dim=1)
flow_out = flow(ix)
f01 = flow_out[:, :2, :, :]
f10 = flow_out[:, 2:, :, :]
frame_buffer = []
for i in range(1, factor):
t = i / factor
temp = -t * (1 - t)
co_eff = [temp, t * t, (1 - t) * (1 - t), temp]
ft0 = co_eff[0] * f01 + co_eff[1] * f10
ft1 = co_eff[2] * f01 + co_eff[3] * f10
gi0ft0 = back_warp(i0, ft0)
gi1ft1 = back_warp(i1, ft1)
iy = torch.cat((i0, i1, f01, f10, ft1, ft0, gi1ft1, gi0ft0), dim=1)
io = interp(iy)
ft0f = io[:, :2, :, :] + ft0
ft1f = io[:, 2:4, :, :] + ft1
vt0 = F.sigmoid(io[:, 4:5, :, :])
vt1 = 1 - vt0
gi0ft0f = back_warp(i0, ft0f)
gi1ft1f = back_warp(i1, ft1f)
co_eff = [1 - t, t]
ft_p = (co_eff[0] * vt0 * gi0ft0f + co_eff[1] * vt1 * gi1ft1f) / \
(co_eff[0] * vt0 + co_eff[1] * vt1)
frame_buffer.append(ft_p)
return frame_buffer
def load_batch(video_in, batch_size, batch, w, h):
if len(batch) > 0:
batch = [batch[-1]]
for i in range(batch_size):
ok, frame = video_in.read()
if not ok:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = Image.fromarray(frame)
frame = frame.resize((w, h), Image.ANTIALIAS)
frame = frame.convert('RGB')
frame = trans_forward(frame)
batch.append(frame)
return batch
def denorm_frame(frame, w0, h0):
frame = frame.cpu()
frame = trans_backward(frame)
frame = frame.resize((w0, h0), Image.BILINEAR)
frame = frame.convert('RGB')
return np.array(frame)[:, :, ::-1].copy()
def convert_video(source, dest, factor, batch_size=10, output_format='mp4v', output_fps=30):
vin = cv2.VideoCapture(source)
count = vin.get(cv2.CAP_PROP_FRAME_COUNT)
w0, h0 = int(vin.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vin.get(cv2.CAP_PROP_FRAME_HEIGHT))
codec = cv2.VideoWriter_fourcc(*output_format)
vout = cv2.VideoWriter(dest, codec, float(output_fps), (w0, h0))
w, h = (w0 // 32) * 32, (h0 // 32) * 32
setup_back_warp(w, h)
done = 0
batch = []
while True:
batch = load_batch(vin, batch_size, batch, w, h)
if len(batch) == 1:
break
done += len(batch) - 1
intermediate_frames = interpolate_batch(batch, factor)
intermediate_frames = list(zip(*intermediate_frames))
for fid, iframe in enumerate(intermediate_frames):
vout.write(denorm_frame(batch[fid], w0, h0))
for frm in iframe:
vout.write(denorm_frame(frm, w0, h0))
try:
yield len(batch), done, count
except StopIteration:
break
vout.write(denorm_frame(batch[0], w0, h0))
vin.release()
vout.release()
@click.command('Evaluate Model by converting a low-FPS video to high-fps')
@click.argument('input')
@click.option('--checkpoint', help='Path to model checkpoint')
@click.option('--output', help='Path to output file to save')
@click.option('--batch', default=2, help='Number of frames to process in single forward pass')
@click.option('--scale', default=4, help='Scale Factor of FPS')
@click.option('--fps', default=30, help='FPS of output video')
def main(input, checkpoint, output, batch, scale, fps):
avg = lambda x, n, x0: (x * n/(n+1) + x0 / (n+1), n+1)
load_models(checkpoint)
t0 = time()
n0 = 0
fpx = 0
for dl, fd, fc in convert_video(input, output, int(scale), int(batch), output_fps=int(fps)):
fpx, n0 = avg(fpx, n0, dl / (time() - t0))
prg = int(100*fd/fc)
eta = (fc - fd) / fpx
print('\rDone: {:03d}% FPS: {:05.2f} ETA: {:.2f}s'.format(prg, fpx, eta) + ' '*5, end='')
t0 = time()
if __name__ == '__main__':
main()
| true | true |
1c3cb9ecbb3df30bfc9f2d479fbb9378e8a24818 | 4,777 | py | Python | scripts/dht_node.py | nishp77/lbry-sdk | 7531401623a393a1491e3b65de0e2a65f8e45020 | [
"MIT"
] | null | null | null | scripts/dht_node.py | nishp77/lbry-sdk | 7531401623a393a1491e3b65de0e2a65f8e45020 | [
"MIT"
] | null | null | null | scripts/dht_node.py | nishp77/lbry-sdk | 7531401623a393a1491e3b65de0e2a65f8e45020 | [
"MIT"
] | null | null | null | import asyncio
import argparse
import logging
import csv
from io import StringIO
from typing import Optional
from aiohttp import web
from prometheus_client import generate_latest as prom_generate_latest, Gauge
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import PeerManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.conf import Config
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
BLOBS_STORED = Gauge(
"blobs_stored", "Number of blob info received", namespace="dht_node",
labelnames=("method",)
)
PEERS = Gauge(
"known_peers", "Number of peers on routing table", namespace="dht_node",
labelnames=("method",)
)
class SimpleMetrics:
def __init__(self, port, node):
self.prometheus_port = port
self.dht_node: Node = node
async def handle_metrics_get_request(self, request: web.Request):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def handle_peers_csv(self, request: web.Request):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["ip", "port", "dht_id"])
writer.writeheader()
for peer in self.dht_node.protocol.routing_table.get_peers():
writer.writerow({"ip": peer.address, "port": peer.udp_port, "dht_id": peer.node_id.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def handle_blobs_csv(self, request: web.Request):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["blob_hash"])
writer.writeheader()
for blob in self.dht_node.protocol.data_store.keys():
writer.writerow({"blob_hash": blob.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
prom_app.router.add_get('/peers.csv', self.handle_peers_csv)
prom_app.router.add_get('/blobs.csv', self.handle_blobs_csv)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional[str], prometheus_port: int):
loop = asyncio.get_event_loop()
conf = Config()
storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
if bootstrap_node:
nodes = bootstrap_node.split(':')
nodes = [(nodes[0], int(nodes[1]))]
else:
nodes = conf.known_dht_nodes
await storage.open()
node = Node(
loop, PeerManager(loop), generate_id(), port, port, 3333, None,
storage=storage
)
if prometheus_port > 0:
metrics = SimpleMetrics(prometheus_port, node)
await metrics.start()
node.start(host, nodes)
while True:
await asyncio.sleep(10)
PEERS.labels('main').set(len(node.protocol.routing_table.get_peers()))
BLOBS_STORED.labels('main').set(len(node.protocol.data_store.get_storing_contacts()))
log.info("Known peers: %d. Storing contact information for %d blobs from %d peers.",
len(node.protocol.routing_table.get_peers()), len(node.protocol.data_store),
len(node.protocol.data_store.get_storing_contacts()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Starts a single DHT node, which then can be used as a seed node or just a contributing node.")
parser.add_argument("--host", default='0.0.0.0', type=str, help="Host to listen for requests. Default: 0.0.0.0")
parser.add_argument("--port", default=4444, type=int, help="Port to listen for requests. Default: 4444")
parser.add_argument("--db_file", default='/tmp/dht.db', type=str, help="DB file to save peers. Default: /tmp/dht.db")
parser.add_argument("--bootstrap_node", default=None, type=str,
help="Node to connect for bootstraping this node. Leave unset to use the default ones. "
"Format: host:port Example: lbrynet1.lbry.com:4444")
parser.add_argument("--metrics_port", default=0, type=int, help="Port for Prometheus and raw CSV metrics. 0 to disable. Default: 0")
args = parser.parse_args()
asyncio.run(main(args.host, args.port, args.db_file, args.bootstrap_node, args.prometheus_port))
| 43.825688 | 136 | 0.675529 | import asyncio
import argparse
import logging
import csv
from io import StringIO
from typing import Optional
from aiohttp import web
from prometheus_client import generate_latest as prom_generate_latest, Gauge
from lbry.dht.constants import generate_id
from lbry.dht.node import Node
from lbry.dht.peer import PeerManager
from lbry.extras.daemon.storage import SQLiteStorage
from lbry.conf import Config
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-4s %(name)s:%(lineno)d: %(message)s")
log = logging.getLogger(__name__)
BLOBS_STORED = Gauge(
"blobs_stored", "Number of blob info received", namespace="dht_node",
labelnames=("method",)
)
PEERS = Gauge(
"known_peers", "Number of peers on routing table", namespace="dht_node",
labelnames=("method",)
)
class SimpleMetrics:
def __init__(self, port, node):
self.prometheus_port = port
self.dht_node: Node = node
async def handle_metrics_get_request(self, request: web.Request):
try:
return web.Response(
text=prom_generate_latest().decode(),
content_type='text/plain; version=0.0.4'
)
except Exception:
log.exception('could not generate prometheus data')
raise
async def handle_peers_csv(self, request: web.Request):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["ip", "port", "dht_id"])
writer.writeheader()
for peer in self.dht_node.protocol.routing_table.get_peers():
writer.writerow({"ip": peer.address, "port": peer.udp_port, "dht_id": peer.node_id.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def handle_blobs_csv(self, request: web.Request):
out = StringIO()
writer = csv.DictWriter(out, fieldnames=["blob_hash"])
writer.writeheader()
for blob in self.dht_node.protocol.data_store.keys():
writer.writerow({"blob_hash": blob.hex()})
return web.Response(text=out.getvalue(), content_type='text/csv')
async def start(self):
prom_app = web.Application()
prom_app.router.add_get('/metrics', self.handle_metrics_get_request)
prom_app.router.add_get('/peers.csv', self.handle_peers_csv)
prom_app.router.add_get('/blobs.csv', self.handle_blobs_csv)
metrics_runner = web.AppRunner(prom_app)
await metrics_runner.setup()
prom_site = web.TCPSite(metrics_runner, "0.0.0.0", self.prometheus_port)
await prom_site.start()
async def main(host: str, port: int, db_file_path: str, bootstrap_node: Optional[str], prometheus_port: int):
loop = asyncio.get_event_loop()
conf = Config()
storage = SQLiteStorage(conf, db_file_path, loop, loop.time)
if bootstrap_node:
nodes = bootstrap_node.split(':')
nodes = [(nodes[0], int(nodes[1]))]
else:
nodes = conf.known_dht_nodes
await storage.open()
node = Node(
loop, PeerManager(loop), generate_id(), port, port, 3333, None,
storage=storage
)
if prometheus_port > 0:
metrics = SimpleMetrics(prometheus_port, node)
await metrics.start()
node.start(host, nodes)
while True:
await asyncio.sleep(10)
PEERS.labels('main').set(len(node.protocol.routing_table.get_peers()))
BLOBS_STORED.labels('main').set(len(node.protocol.data_store.get_storing_contacts()))
log.info("Known peers: %d. Storing contact information for %d blobs from %d peers.",
len(node.protocol.routing_table.get_peers()), len(node.protocol.data_store),
len(node.protocol.data_store.get_storing_contacts()))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Starts a single DHT node, which then can be used as a seed node or just a contributing node.")
parser.add_argument("--host", default='0.0.0.0', type=str, help="Host to listen for requests. Default: 0.0.0.0")
parser.add_argument("--port", default=4444, type=int, help="Port to listen for requests. Default: 4444")
parser.add_argument("--db_file", default='/tmp/dht.db', type=str, help="DB file to save peers. Default: /tmp/dht.db")
parser.add_argument("--bootstrap_node", default=None, type=str,
help="Node to connect for bootstraping this node. Leave unset to use the default ones. "
"Format: host:port Example: lbrynet1.lbry.com:4444")
parser.add_argument("--metrics_port", default=0, type=int, help="Port for Prometheus and raw CSV metrics. 0 to disable. Default: 0")
args = parser.parse_args()
asyncio.run(main(args.host, args.port, args.db_file, args.bootstrap_node, args.prometheus_port))
| true | true |
1c3cba20c7bb308310ab68189d20d39b0c185720 | 970 | py | Python | lib/taurus/qt/qtgui/taurusgui/conf/tgconf_macrogui/__init__.py | mrosanes/taurus_deb | 119bf27193af0bbaaececf054eefb78beb6f117a | [
"CC-BY-3.0"
] | 1 | 2016-10-19T13:54:08.000Z | 2016-10-19T13:54:08.000Z | lib/taurus/qt/qtgui/taurusgui/conf/tgconf_macrogui/__init__.py | mrosanes/taurus_deb | 119bf27193af0bbaaececf054eefb78beb6f117a | [
"CC-BY-3.0"
] | 27 | 2016-05-25T08:56:58.000Z | 2019-01-21T09:18:08.000Z | lib/taurus/qt/qtgui/taurusgui/conf/tgconf_macrogui/__init__.py | mrosanes/taurus_deb | 119bf27193af0bbaaececf054eefb78beb6f117a | [
"CC-BY-3.0"
] | 8 | 2015-07-24T09:16:50.000Z | 2018-06-12T12:33:59.000Z | #!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
###########################################################################
from config import *
| 35.925926 | 77 | 0.626804 | true | true | |
1c3cba2b6886a5a8031531b3445f65d1fcd6c618 | 1,869 | py | Python | Study-Basic/005_study_json.py | Cpaul777/Script-Archive | 2c2d99d41206d98486d9aebdf1acbb0c06b4513d | [
"MIT"
] | 4 | 2021-01-28T12:01:08.000Z | 2021-01-28T14:04:45.000Z | Study-Basic/005_study_json.py | Xzlaynveir-Deirdre/Script-Archive | e937129466ebd15272c23df76d3b8a459e62a51d | [
"MIT"
] | null | null | null | Study-Basic/005_study_json.py | Xzlaynveir-Deirdre/Script-Archive | e937129466ebd15272c23df76d3b8a459e62a51d | [
"MIT"
] | 1 | 2021-12-18T11:17:51.000Z | 2021-12-18T11:17:51.000Z | """Link for the source video https://youtu.be/9N6a-VLBa2I"""
import json
SOME_DATA = '''
{
"test_variable": [
{
"test_key_1":"test_value_1",
"test_key_2":"test_vaoue_2",
"test_key_3":"test_value_3"
},
{
"test_key_1":"test_value_4",
"test_key_2":"test_vaoue_5",
"test_key_3":"test_value_6"
}
]
}
'''
print(type(SOME_DATA))
"""FROM JSON TO PYTHON"""
THE_DATA = json.loads(SOME_DATA)
print(THE_DATA)
print(type(THE_DATA), end='\n\n')
"""
Info "loads()"
Coverts json into a python object. Used loads() because it loaded from json to python object.
"""
for test in THE_DATA['test_variable']:
print(test['test_key_1'])
print(THE_DATA, end='\n\n')
"""
Since this is now a python object we can now access it as a dictionary.
"""
"""FROM PYTHON TO JSON"""
NEW_DATA = json.dumps(THE_DATA, indent=2, sort_keys=True)
print(NEW_DATA, end='\n'*6)
"""
Info "dumps()"
Coverts python object into json. Used dumps() because it dumped python object to json.
"""
"""TESTING IT ALL OUT"""
with open('test_json.json') as f:#It is opened as json
data = json.load(f)
#Coverts it to python object
"""
Info
assigning data to a variable so I can edit it later
"""
#Editing data
#Challenge change the key to monkeys and create 3 different monkeys.
#must have name, age, has_banana
needs = ['monkey',['name','age','has_banana']]
monkey = [['John',25,True],['Austin',12,True],['Kiara',16,False]]
del data
data = {needs[0]:[]}
for small_brain in range(0, len(monkey)):
temp_dict = {}
for number, listed in enumerate(monkey[small_brain]):
temp_dict[needs[-1][number]] = listed
data[needs[0]].append(temp_dict)
print(data)
print(type(data))
with open('test_json.json', 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
"""writing the file so the new things overwrites the old ones"""
| 21.732558 | 93 | 0.663991 |
import json
SOME_DATA = '''
{
"test_variable": [
{
"test_key_1":"test_value_1",
"test_key_2":"test_vaoue_2",
"test_key_3":"test_value_3"
},
{
"test_key_1":"test_value_4",
"test_key_2":"test_vaoue_5",
"test_key_3":"test_value_6"
}
]
}
'''
print(type(SOME_DATA))
THE_DATA = json.loads(SOME_DATA)
print(THE_DATA)
print(type(THE_DATA), end='\n\n')
for test in THE_DATA['test_variable']:
print(test['test_key_1'])
print(THE_DATA, end='\n\n')
NEW_DATA = json.dumps(THE_DATA, indent=2, sort_keys=True)
print(NEW_DATA, end='\n'*6)
with open('test_json.json') as f:
data = json.load(f)
needs = ['monkey',['name','age','has_banana']]
monkey = [['John',25,True],['Austin',12,True],['Kiara',16,False]]
del data
data = {needs[0]:[]}
for small_brain in range(0, len(monkey)):
temp_dict = {}
for number, listed in enumerate(monkey[small_brain]):
temp_dict[needs[-1][number]] = listed
data[needs[0]].append(temp_dict)
print(data)
print(type(data))
with open('test_json.json', 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
| true | true |
1c3cbd21d51b2ab73a47dbb655c309eb0c3a97d5 | 78,740 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/operations/_resources_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2021-09-07T18:35:49.000Z | 2021-09-07T18:35:49.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/operations/_resources_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2019_05_10/operations/_resources_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 1 | 2019-04-05T18:17:43.000Z | 2019-04-05T18:17:43.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourcesOperations(object):
"""ResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.resources.v2019_05_10.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource_group(
self,
resource_group_name, # type: str
filter=None, # type: Optional[str]
expand=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceListResult"]
"""Get all the resources for a resource group.
:param resource_group_name: The resource group with the resources to get.
:type resource_group_name: str
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1':code:`<br>`:code:`<br>`You can use some
properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resources.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources'} # type: ignore
def _move_resources_initial(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._move_resources_initial.metadata['url'] # type: ignore
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourcesMoveInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'} # type: ignore
def begin_move_resources(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Moves resources from one resource group to another resource group.
The resources to move must be in the same source resource group. The target resource group may
be in a different subscription. When moving resources, both the source group and the target
group are locked for the duration of the operation. Write and delete operations are blocked on
the groups until the move completes.
:param source_resource_group_name: The name of the resource group containing the resources to
move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'} # type: ignore
def _validate_move_resources_initial(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._validate_move_resources_initial.metadata['url'] # type: ignore
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourcesMoveInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204, 409]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources'} # type: ignore
def begin_validate_move_resources(
self,
source_resource_group_name, # type: str
parameters, # type: "_models.ResourcesMoveInfo"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Validates whether resources can be moved from one resource group to another resource group.
This operation checks whether the specified resources can be moved to the target. The resources
to move must be in the same source resource group. The target resource group may be in a
different subscription. If validation succeeds, it returns HTTP response code 204 (no content).
If validation fails, it returns HTTP response code 409 (Conflict) with an error message.
Retrieve the URL in the Location header value to check the result of the long-running
operation.
:param source_resource_group_name: The name of the resource group containing the resources to
validate for move.
:type source_resource_group_name: str
:param parameters: Parameters for moving resources.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.ResourcesMoveInfo
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._validate_move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate_move_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources'} # type: ignore
def list(
self,
filter=None, # type: Optional[str]
expand=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ResourceListResult"]
"""Get all the resources in a subscription.
:param filter: The filter to apply on the operation.:code:`<br>`:code:`<br>`The properties you
can use for eq (equals) or ne (not equals) are: location, resourceType, name, resourceGroup,
identity, identity/principalId, plan, plan/publisher, plan/product, plan/name, plan/version,
and plan/promotionCode.:code:`<br>`:code:`<br>`For example, to filter by a resource type, use:
$filter=resourceType eq 'Microsoft.Network/virtualNetworks':code:`<br>`:code:`<br>`You can use
substringof(value, property) in the filter. The properties you can use for substring are: name
and resourceGroup.:code:`<br>`:code:`<br>`For example, to get all resources with 'demo'
anywhere in the name, use: $filter=substringof('demo', name):code:`<br>`:code:`<br>`You can
link more than one substringof together by adding and/or operators.:code:`<br>`:code:`<br>`You
can filter by tag names and values. For example, to filter for a tag name and value, use
$filter=tagName eq 'tag1' and tagValue eq 'Value1':code:`<br>`:code:`<br>`You can use some
properties together when filtering. The combinations you can use are: substringof and/or
resourceType, plan and plan/publisher and plan/name, identity and identity/principalId.
:type filter: str
:param expand: Comma-separated list of additional properties to be included in the response.
Valid values include ``createdTime``\ , ``changedTime`` and ``provisioningState``. For example,
``$expand=createdTime,changedTime``.
:type expand: str
:param top: The number of results to return. If null is passed, returns all resource groups.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ResourceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.resources.v2019_05_10.models.ResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resources'} # type: ignore
def check_existence(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks whether a resource exists.
:param resource_group_name: The name of the resource group containing the resource to check.
The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The resource provider of the resource to check.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to check whether it exists.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.check_existence.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a resource.
:param resource_group_name: The name of the resource group that contains the resource to
delete. The name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource to delete.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Creates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to create.
:type resource_type: str
:param resource_name: The name of the resource to create.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for creating or updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Updates a resource.
:param resource_group_name: The name of the resource group for the resource. The name is case
insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource to update.
:type resource_type: str
:param resource_name: The name of the resource to update.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Parameters for updating the resource.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
resource_provider_namespace, # type: str
parent_resource_path, # type: str
resource_type, # type: str
resource_name, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GenericResource"
"""Gets a resource.
:param resource_group_name: The name of the resource group containing the resource to get. The
name is case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource identity.
:type parent_resource_path: str
:param resource_type: The resource type of the resource.
:type resource_type: str
:param resource_name: The name of the resource to get.
:type resource_name: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'} # type: ignore
def check_existence_by_id(
self,
resource_id, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Checks by ID whether a resource exists.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self.check_existence_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _delete_by_id_initial(
self,
resource_id, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
# Construct URL
url = self._delete_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_delete_by_id(
self,
resource_id, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_by_id_initial(
resource_id=resource_id,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _create_or_update_by_id_initial(
self,
resource_id, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_create_or_update_by_id(
self,
resource_id, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Create a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Create or update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def _update_by_id_initial(
self,
resource_id, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.GenericResource"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.GenericResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_by_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': '/{resourceId}'} # type: ignore
def begin_update_by_id(
self,
resource_id, # type: str
api_version, # type: str
parameters, # type: "_models.GenericResource"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.GenericResource"]
"""Updates a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:param parameters: Update resource parameters.
:type parameters: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either GenericResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
def get_by_id(
self,
resource_id, # type: str
api_version, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.GenericResource"
"""Gets a resource by ID.
:param resource_id: The fully qualified ID of the resource, including the resource name and
resource type. Use the format,
/subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name}.
:type resource_id: str
:param api_version: The API version to use for the operation.
:type api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GenericResource, or the result of cls(response)
:rtype: ~azure.mgmt.resource.resources.v2019_05_10.models.GenericResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GenericResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{resourceId}'} # type: ignore
| 51.12987 | 223 | 0.660859 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ResourcesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource_group(
self,
resource_group_name,
filter=None,
expand=None,
top=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources'}
def _move_resources_initial(
self,
source_resource_group_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
content_type = kwargs.pop("content_type", "application/json")
url = self._move_resources_initial.metadata['url']
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ResourcesMoveInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_move_resources_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'}
def begin_move_resources(
self,
source_resource_group_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_move_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources'}
def _validate_move_resources_initial(
self,
source_resource_group_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
content_type = kwargs.pop("content_type", "application/json")
url = self._validate_move_resources_initial.metadata['url']
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'ResourcesMoveInfo')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204, 409]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_validate_move_resources_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources'}
def begin_validate_move_resources(
self,
source_resource_group_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._validate_move_resources_initial(
source_resource_group_name=source_resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'sourceResourceGroupName': self._serialize.url("source_resource_group_name", source_resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_validate_move_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources'}
def list(
self,
filter=None,
expand=None,
top=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-10"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resources'}
def check_existence(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
url = self.check_existence.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def _delete_initial(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def begin_delete(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def _create_or_update_initial(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def begin_create_or_update(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def _update_initial(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def begin_update(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def get(
self,
resource_group_name,
resource_provider_namespace,
parent_resource_path,
resource_type,
resource_name,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}'}
def check_existence_by_id(
self,
resource_id,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
url = self.check_existence_by_id.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
check_existence_by_id.metadata = {'url': '/{resourceId}'}
def _delete_by_id_initial(
self,
resource_id,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
url = self._delete_by_id_initial.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_by_id_initial.metadata = {'url': '/{resourceId}'}
def begin_delete_by_id(
self,
resource_id,
api_version,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_by_id_initial(
resource_id=resource_id,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_by_id.metadata = {'url': '/{resourceId}'}
def _create_or_update_by_id_initial(
self,
resource_id,
api_version,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_by_id_initial.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_by_id_initial.metadata = {'url': '/{resourceId}'}
def begin_create_or_update_by_id(
self,
resource_id,
api_version,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_by_id.metadata = {'url': '/{resourceId}'}
def _update_by_id_initial(
self,
resource_id,
api_version,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_by_id_initial.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'GenericResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_by_id_initial.metadata = {'url': '/{resourceId}'}
def begin_update_by_id(
self,
resource_id,
api_version,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_by_id_initial(
resource_id=resource_id,
api_version=api_version,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_by_id.metadata = {'url': '/{resourceId}'}
def get_by_id(
self,
resource_id,
api_version,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
url = self.get_by_id.metadata['url']
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GenericResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{resourceId}'}
| true | true |
1c3cbd32e097be9b631ffcf635e4b4af3d1bc5b0 | 2,707 | py | Python | tests/gold_tests/tls_hooks/tls_hooks15.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 1,351 | 2015-01-03T08:25:40.000Z | 2022-03-31T09:14:08.000Z | tests/gold_tests/tls_hooks/tls_hooks15.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 7,009 | 2015-01-14T16:22:45.000Z | 2022-03-31T17:18:04.000Z | tests/gold_tests/tls_hooks/tls_hooks15.test.py | cmcfarlen/trafficserver | 2aa1d3106398eb082e5a454212b0273c63d5f69d | [
"Apache-2.0"
] | 901 | 2015-01-11T19:21:08.000Z | 2022-03-18T18:21:33.000Z | '''
Test one delayed preaccept callback
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", ssl=True)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addDefaultSSLFiles()
ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)
)
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-close=2 -out_close=1')
tr = Test.AddTestRun("Test one delayed preaccept hook")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
ts.Streams.stderr = "gold/ts-close-out-close.gold"
tr.Processes.Default.TimeOut = 15
tr.TimeOut = 15
| 42.968254 | 122 | 0.716661 |
import os
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
server = Test.MakeOriginServer("server", ssl=True)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addDefaultSSLFiles()
ts.Disk.records_config.update({'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:{0} https://127.0.0.1:{1}'.format(ts.Variables.ssl_port, server.Variables.SSL_Port)
)
Test.PrepareTestPlugin(os.path.join(Test.Variables.AtsTestPluginsDir, 'ssl_hook_test.so'), ts, '-close=2 -out_close=1')
tr = Test.AddTestRun("Test one delayed preaccept hook")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
ts.Streams.stderr = "gold/ts-close-out-close.gold"
tr.Processes.Default.TimeOut = 15
tr.TimeOut = 15
| true | true |
1c3cbd6c271b8033194c3ed634f6df820c824a31 | 1,223 | py | Python | project/expenses/migrations/0001_initial.py | MaciejChoromanski/parleto-recruitment-task | f6e459646feea776eba7d10fc17aa34ec32bd5c5 | [
"MIT"
] | null | null | null | project/expenses/migrations/0001_initial.py | MaciejChoromanski/parleto-recruitment-task | f6e459646feea776eba7d10fc17aa34ec32bd5c5 | [
"MIT"
] | null | null | null | project/expenses/migrations/0001_initial.py | MaciejChoromanski/parleto-recruitment-task | f6e459646feea776eba7d10fc17aa34ec32bd5c5 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-02-05 12:23
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('amount', models.DecimalField(decimal_places=2, max_digits=8)),
('date', models.DateField(db_index=True, default=datetime.date.today)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='expenses.Category')),
],
options={
'ordering': ('-date', '-pk'),
},
),
]
| 33.054054 | 140 | 0.572363 |
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('amount', models.DecimalField(decimal_places=2, max_digits=8)),
('date', models.DateField(db_index=True, default=datetime.date.today)),
('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='expenses.Category')),
],
options={
'ordering': ('-date', '-pk'),
},
),
]
| true | true |
1c3cbdcd5dfef12337865a12e26987f59af43ac2 | 591 | py | Python | 29.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | 29.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | 29.py | RafaelHuang87/Leet-Code-Practice | 7754dcee38ffda18a5759113ef06d7becf4fe728 | [
"MIT"
] | null | null | null | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if divisor == 0:
return 1 << 31 - 1
if dividend == 0:
return 0
i = 0
res = 0
p = abs(dividend)
q = abs(divisor)
while q << i <= p:
i = i + 1
for j in reversed(range(i)):
if q << j <= p:
p -= q << j
res += 1 << j
if (dividend > 0) != (divisor > 0) or res < -1 << 31:
res = -res
return min(res, 1 << 31 - 1)
s = Solution()
print((1 << 31) - 1) | 23.64 | 61 | 0.385787 | class Solution:
def divide(self, dividend: int, divisor: int) -> int:
if divisor == 0:
return 1 << 31 - 1
if dividend == 0:
return 0
i = 0
res = 0
p = abs(dividend)
q = abs(divisor)
while q << i <= p:
i = i + 1
for j in reversed(range(i)):
if q << j <= p:
p -= q << j
res += 1 << j
if (dividend > 0) != (divisor > 0) or res < -1 << 31:
res = -res
return min(res, 1 << 31 - 1)
s = Solution()
print((1 << 31) - 1) | true | true |
1c3cbdec1f47bbbdc448f2d53cb9050ff3f5baa2 | 13,573 | py | Python | gym_compete/policy.py | eunjilisa/CSE291DRL | 6b548673e1a974eb9448bb92d6fad9a1ca81bf3c | [
"MIT"
] | null | null | null | gym_compete/policy.py | eunjilisa/CSE291DRL | 6b548673e1a974eb9448bb92d6fad9a1ca81bf3c | [
"MIT"
] | null | null | null | gym_compete/policy.py | eunjilisa/CSE291DRL | 6b548673e1a974eb9448bb92d6fad9a1ca81bf3c | [
"MIT"
] | null | null | null | """Abstract policy class and some concrete implementations."""
from gym.spaces import Box
import numpy as np
from stable_baselines.common.tf_layers import ortho_init
from stable_baselines.common.tf_util import seq_to_batch
from stable_baselines.common.distributions import DiagGaussianProbabilityDistribution
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy, register_policy
import tensorflow as tf
class RunningMeanStd(object):
def __init__(self, scope="running", reuse=False, epsilon=1e-2, shape=()):
with tf.variable_scope(scope, reuse=reuse):
# We need these variables to be serialized/deserialized.
# Stable Baselines reasonably assumes only trainable variables need to be serialized.
# However, we do not want the optimizer to update these. In principle, we should
# update these based on observation history. However, Bansal et al's open-source code
# did not include support for this, and since they are unlikely to change much with
# additional training I have not added support for this.
# Hack: make them trainable, but use stop_gradients to stop them from being updated.
self._sum = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="sum", trainable=True))
self._sumsq = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="sumsq", trainable=True))
self._count = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=True))
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
var_est = tf.to_float(self._sumsq / self._count) - tf.square(self.mean)
self.std = tf.sqrt(tf.maximum(var_est, 1e-2))
def dense(x, size, name, weight_init=None, bias=True):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=weight_init)
ret = tf.matmul(x, w)
if bias:
b = tf.get_variable(name + "/b", [size], initializer=tf.zeros_initializer())
return ret + b
else:
return ret
class GymCompetePolicy(ActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
state_shape=None, scope="input", reuse=False, normalize=False):
ActorCriticPolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
reuse=reuse, scale=False)
self.hiddens = hiddens
self.normalized = normalize
self.weight_init = ortho_init(scale=0.01)
self.observation_space = ob_space
self.action_space = ac_space
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
self.scope = tf.get_variable_scope().name
assert isinstance(ob_space, Box)
if self.normalized:
if self.normalized != 'ob':
self.ret_rms = RunningMeanStd(scope="retfilter")
self.ob_rms = RunningMeanStd(shape=ob_space.shape, scope="obsfilter")
self.obz = self.processed_obs
if self.normalized:
self.obz = tf.clip_by_value((self.processed_obs - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
def _setup_init(self):
pdparam = tf.concat([self.policy, self.policy * 0.0 + self.logstd], axis=1)
self._proba_distribution = DiagGaussianProbabilityDistribution(pdparam)
super()._setup_init()
def restore(self, params):
with self.sess.graph.as_default():
var_list = self.get_trainable_variables()
shapes = list(map(lambda x: x.get_shape().as_list(), var_list))
total_size = np.sum([int(np.prod(shape)) for shape in shapes])
theta = tf.placeholder(tf.float32, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = int(np.prod(shape))
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
op = tf.group(*assigns)
self.sess.run(op, {theta: params})
def get_trainable_variables(self):
return self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
class MlpPolicyValue(GymCompetePolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
scope="input", reuse=False, normalize=False):
if hiddens is None:
hiddens = [64, 64]
super().__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=hiddens,
scope=scope, reuse=reuse, normalize=normalize)
self._initial_state = None
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
def dense_net(prefix, shape):
last_out = self.obz
ff_outs = []
for i, hid_size in enumerate(hiddens):
h = dense(last_out, hid_size, f'{prefix}{i + 1}',
weight_init=self.weight_init)
last_out = tf.nn.tanh(h)
ff_outs.append(last_out)
return dense(last_out, shape, f'{prefix}final',
weight_init=self.weight_init), ff_outs
self._value_fn, value_ff_acts = dense_net('vff', 1)
if self.normalized and self.normalized != 'ob':
self._value_fn = self._value_fn * self.ret_rms.std + self.ret_rms.mean # raw = not standardized
self._policy, policy_ff_acts = dense_net('pol', ac_space.shape[0])
self.ff_out = {'value': value_ff_acts, 'policy': policy_ff_acts}
self.logstd = tf.get_variable(name="logstd", shape=[1, ac_space.shape[0]],
initializer=tf.zeros_initializer())
self._setup_init()
def step(self, obs, state=None, mask=None, deterministic=False, extra_op=None):
action = self.deterministic_action if deterministic else self.action
outputs = [action, self.value_flat, self.neglogp]
if extra_op is not None:
outputs.append(extra_op)
a, v, neglogp, ex = self.sess.run(outputs, {self.obs_ph: obs})
return a, v, self.initial_state, neglogp, ex
else:
a, v, neglogp = self.sess.run(outputs, {self.obs_ph: obs})
return a, v, self.initial_state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs})
def value(self, obs, state=None, mask=None):
value = self.sess.run(self.value_flat, {self.obs_ph: obs})
return value
class LSTMPolicy(GymCompetePolicy, RecurrentActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
scope="input", reuse=False, normalize=False):
if hiddens is None:
hiddens = [128, 128]
num_lstm = hiddens[-1]
RecurrentActorCriticPolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
state_shape=(4, num_lstm), reuse=reuse)
GymCompetePolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
hiddens=hiddens, scope=scope, reuse=reuse, normalize=normalize)
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
self.state_out = []
states = tf.transpose(self.states_ph, (1, 0, 2))
def lstm(start, suffix):
# Feed forward
ff_out = self.obz
ff_list = []
for hidden in self.hiddens[:-1]:
ff_out = tf.contrib.layers.fully_connected(ff_out, hidden)
batch_ff_out = tf.reshape(ff_out, [self.n_env, n_steps, -1])
ff_list.append(batch_ff_out)
# Batch->Seq
input_seq = tf.reshape(ff_out, [self.n_env, n_steps, -1])
input_seq = tf.transpose(input_seq, (1, 0, 2))
masks = tf.reshape(self.dones_ph, [self.n_env, n_steps, 1])
# RNN
inputs_ta = tf.TensorArray(dtype=tf.float32, size=n_steps)
inputs_ta = inputs_ta.unstack(input_seq)
cell = tf.contrib.rnn.BasicLSTMCell(num_lstm, reuse=reuse)
initial_state = tf.contrib.rnn.LSTMStateTuple(states[start], states[start + 1])
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output
elements_finished = time >= n_steps
finished = tf.reduce_all(elements_finished)
# TODO: use masks
mask = tf.cond(finished,
lambda: tf.zeros([self.n_env, 1], dtype=tf.float32),
lambda: masks[:, time, :])
next_cell_state = cell_state or initial_state
next_cell_state = tf.contrib.rnn.LSTMStateTuple(next_cell_state.c * (1 - mask),
next_cell_state.h * (1 - mask))
next_input = tf.cond(
finished,
lambda: tf.zeros([self.n_env, ff_out.shape[-1]],
dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = tf.nn.raw_rnn(cell, loop_fn,
parallel_iterations=1,
scope=f'lstm{suffix}')
last_out = outputs_ta.stack()
last_out = seq_to_batch(last_out)
self.state_out.append(final_state)
return last_out, ff_list
value_out, value_ff_acts = lstm(0, 'v')
self._value_fn = tf.contrib.layers.fully_connected(value_out, 1, activation_fn=None)
if self.normalized and self.normalized != 'ob':
self._value_fn = self.value_fn * self.ret_rms.std + self.ret_rms.mean # raw = not standardized
mean, policy_ff_acts = lstm(2, 'p')
mean = tf.contrib.layers.fully_connected(mean, ac_space.shape[0],
activation_fn=None)
logstd = tf.get_variable(name="logstd", shape=[1, ac_space.shape[0]],
initializer=tf.zeros_initializer())
self.ff_out = {'value': value_ff_acts, 'policy': policy_ff_acts}
self._policy = tf.reshape(mean, [n_batch] + list(ac_space.shape))
self.logstd = tf.reshape(logstd, ac_space.shape)
zero_state = np.zeros((4, num_lstm), dtype=np.float32)
self._initial_state = np.tile(zero_state, (self.n_env, 1, 1))
for p in self.get_trainable_variables():
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.square(p)))
self._setup_init()
def _make_feed_dict(self, obs, state, mask):
return {
self.obs_ph: obs,
self.states_ph: state,
self.dones_ph: mask,
}
def step(self, obs, state=None, mask=None, deterministic=False, extra_op=None):
action = self.deterministic_action if deterministic else self.action
feed_dict = self._make_feed_dict(obs, state, mask)
outputs = [action, self.value_flat, self.state_out, self.neglogp]
if extra_op is not None:
outputs.append(extra_op)
a, v, s, neglogp, ex = self.sess.run(outputs, feed_dict)
else:
a, v, s, neglogp = self.sess.run(outputs, feed_dict)
state = []
for x in s:
state.append(x.c)
state.append(x.h)
state = np.array(state)
state = np.transpose(state, (1, 0, 2))
if extra_op is not None:
return a, v, state, neglogp, ex
else:
return a, v, state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, self._make_feed_dict(obs, state, mask))
def value(self, obs, state=None, mask=None):
return self.sess.run(self.value_flat, self._make_feed_dict(obs, state, mask))
register_policy('BansalMlpPolicy', MlpPolicyValue)
register_policy('BansalLstmPolicy', LSTMPolicy)
| 47.458042 | 117 | 0.568997 |
from gym.spaces import Box
import numpy as np
from stable_baselines.common.tf_layers import ortho_init
from stable_baselines.common.tf_util import seq_to_batch
from stable_baselines.common.distributions import DiagGaussianProbabilityDistribution
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy, register_policy
import tensorflow as tf
class RunningMeanStd(object):
def __init__(self, scope="running", reuse=False, epsilon=1e-2, shape=()):
with tf.variable_scope(scope, reuse=reuse):
# did not include support for this, and since they are unlikely to change much with
# additional training I have not added support for this.
# Hack: make them trainable, but use stop_gradients to stop them from being updated.
self._sum = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=shape,
initializer=tf.constant_initializer(0.0),
name="sum", trainable=True))
self._sumsq = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=shape,
initializer=tf.constant_initializer(epsilon),
name="sumsq", trainable=True))
self._count = tf.stop_gradient(tf.get_variable(
dtype=tf.float32,
shape=(),
initializer=tf.constant_initializer(epsilon),
name="count", trainable=True))
self.shape = shape
self.mean = tf.to_float(self._sum / self._count)
var_est = tf.to_float(self._sumsq / self._count) - tf.square(self.mean)
self.std = tf.sqrt(tf.maximum(var_est, 1e-2))
def dense(x, size, name, weight_init=None, bias=True):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=weight_init)
ret = tf.matmul(x, w)
if bias:
b = tf.get_variable(name + "/b", [size], initializer=tf.zeros_initializer())
return ret + b
else:
return ret
class GymCompetePolicy(ActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
state_shape=None, scope="input", reuse=False, normalize=False):
ActorCriticPolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
reuse=reuse, scale=False)
self.hiddens = hiddens
self.normalized = normalize
self.weight_init = ortho_init(scale=0.01)
self.observation_space = ob_space
self.action_space = ac_space
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
self.scope = tf.get_variable_scope().name
assert isinstance(ob_space, Box)
if self.normalized:
if self.normalized != 'ob':
self.ret_rms = RunningMeanStd(scope="retfilter")
self.ob_rms = RunningMeanStd(shape=ob_space.shape, scope="obsfilter")
self.obz = self.processed_obs
if self.normalized:
self.obz = tf.clip_by_value((self.processed_obs - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
def _setup_init(self):
pdparam = tf.concat([self.policy, self.policy * 0.0 + self.logstd], axis=1)
self._proba_distribution = DiagGaussianProbabilityDistribution(pdparam)
super()._setup_init()
def restore(self, params):
with self.sess.graph.as_default():
var_list = self.get_trainable_variables()
shapes = list(map(lambda x: x.get_shape().as_list(), var_list))
total_size = np.sum([int(np.prod(shape)) for shape in shapes])
theta = tf.placeholder(tf.float32, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = int(np.prod(shape))
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
op = tf.group(*assigns)
self.sess.run(op, {theta: params})
def get_trainable_variables(self):
return self.sess.graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
class MlpPolicyValue(GymCompetePolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
scope="input", reuse=False, normalize=False):
if hiddens is None:
hiddens = [64, 64]
super().__init__(sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=hiddens,
scope=scope, reuse=reuse, normalize=normalize)
self._initial_state = None
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
def dense_net(prefix, shape):
last_out = self.obz
ff_outs = []
for i, hid_size in enumerate(hiddens):
h = dense(last_out, hid_size, f'{prefix}{i + 1}',
weight_init=self.weight_init)
last_out = tf.nn.tanh(h)
ff_outs.append(last_out)
return dense(last_out, shape, f'{prefix}final',
weight_init=self.weight_init), ff_outs
self._value_fn, value_ff_acts = dense_net('vff', 1)
if self.normalized and self.normalized != 'ob':
self._value_fn = self._value_fn * self.ret_rms.std + self.ret_rms.mean # raw = not standardized
self._policy, policy_ff_acts = dense_net('pol', ac_space.shape[0])
self.ff_out = {'value': value_ff_acts, 'policy': policy_ff_acts}
self.logstd = tf.get_variable(name="logstd", shape=[1, ac_space.shape[0]],
initializer=tf.zeros_initializer())
self._setup_init()
def step(self, obs, state=None, mask=None, deterministic=False, extra_op=None):
action = self.deterministic_action if deterministic else self.action
outputs = [action, self.value_flat, self.neglogp]
if extra_op is not None:
outputs.append(extra_op)
a, v, neglogp, ex = self.sess.run(outputs, {self.obs_ph: obs})
return a, v, self.initial_state, neglogp, ex
else:
a, v, neglogp = self.sess.run(outputs, {self.obs_ph: obs})
return a, v, self.initial_state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, {self.obs_ph: obs})
def value(self, obs, state=None, mask=None):
value = self.sess.run(self.value_flat, {self.obs_ph: obs})
return value
class LSTMPolicy(GymCompetePolicy, RecurrentActorCriticPolicy):
def __init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch, hiddens=None,
scope="input", reuse=False, normalize=False):
if hiddens is None:
hiddens = [128, 128]
num_lstm = hiddens[-1]
RecurrentActorCriticPolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
state_shape=(4, num_lstm), reuse=reuse)
GymCompetePolicy.__init__(self, sess, ob_space, ac_space, n_env, n_steps, n_batch,
hiddens=hiddens, scope=scope, reuse=reuse, normalize=normalize)
with self.sess.graph.as_default():
with tf.variable_scope(scope, reuse=reuse):
self.state_out = []
states = tf.transpose(self.states_ph, (1, 0, 2))
def lstm(start, suffix):
# Feed forward
ff_out = self.obz
ff_list = []
for hidden in self.hiddens[:-1]:
ff_out = tf.contrib.layers.fully_connected(ff_out, hidden)
batch_ff_out = tf.reshape(ff_out, [self.n_env, n_steps, -1])
ff_list.append(batch_ff_out)
# Batch->Seq
input_seq = tf.reshape(ff_out, [self.n_env, n_steps, -1])
input_seq = tf.transpose(input_seq, (1, 0, 2))
masks = tf.reshape(self.dones_ph, [self.n_env, n_steps, 1])
# RNN
inputs_ta = tf.TensorArray(dtype=tf.float32, size=n_steps)
inputs_ta = inputs_ta.unstack(input_seq)
cell = tf.contrib.rnn.BasicLSTMCell(num_lstm, reuse=reuse)
initial_state = tf.contrib.rnn.LSTMStateTuple(states[start], states[start + 1])
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output
elements_finished = time >= n_steps
finished = tf.reduce_all(elements_finished)
# TODO: use masks
mask = tf.cond(finished,
lambda: tf.zeros([self.n_env, 1], dtype=tf.float32),
lambda: masks[:, time, :])
next_cell_state = cell_state or initial_state
next_cell_state = tf.contrib.rnn.LSTMStateTuple(next_cell_state.c * (1 - mask),
next_cell_state.h * (1 - mask))
next_input = tf.cond(
finished,
lambda: tf.zeros([self.n_env, ff_out.shape[-1]],
dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = tf.nn.raw_rnn(cell, loop_fn,
parallel_iterations=1,
scope=f'lstm{suffix}')
last_out = outputs_ta.stack()
last_out = seq_to_batch(last_out)
self.state_out.append(final_state)
return last_out, ff_list
value_out, value_ff_acts = lstm(0, 'v')
self._value_fn = tf.contrib.layers.fully_connected(value_out, 1, activation_fn=None)
if self.normalized and self.normalized != 'ob':
self._value_fn = self.value_fn * self.ret_rms.std + self.ret_rms.mean # raw = not standardized
mean, policy_ff_acts = lstm(2, 'p')
mean = tf.contrib.layers.fully_connected(mean, ac_space.shape[0],
activation_fn=None)
logstd = tf.get_variable(name="logstd", shape=[1, ac_space.shape[0]],
initializer=tf.zeros_initializer())
self.ff_out = {'value': value_ff_acts, 'policy': policy_ff_acts}
self._policy = tf.reshape(mean, [n_batch] + list(ac_space.shape))
self.logstd = tf.reshape(logstd, ac_space.shape)
zero_state = np.zeros((4, num_lstm), dtype=np.float32)
self._initial_state = np.tile(zero_state, (self.n_env, 1, 1))
for p in self.get_trainable_variables():
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, tf.reduce_sum(tf.square(p)))
self._setup_init()
def _make_feed_dict(self, obs, state, mask):
return {
self.obs_ph: obs,
self.states_ph: state,
self.dones_ph: mask,
}
def step(self, obs, state=None, mask=None, deterministic=False, extra_op=None):
action = self.deterministic_action if deterministic else self.action
feed_dict = self._make_feed_dict(obs, state, mask)
outputs = [action, self.value_flat, self.state_out, self.neglogp]
if extra_op is not None:
outputs.append(extra_op)
a, v, s, neglogp, ex = self.sess.run(outputs, feed_dict)
else:
a, v, s, neglogp = self.sess.run(outputs, feed_dict)
state = []
for x in s:
state.append(x.c)
state.append(x.h)
state = np.array(state)
state = np.transpose(state, (1, 0, 2))
if extra_op is not None:
return a, v, state, neglogp, ex
else:
return a, v, state, neglogp
def proba_step(self, obs, state=None, mask=None):
return self.sess.run(self.policy_proba, self._make_feed_dict(obs, state, mask))
def value(self, obs, state=None, mask=None):
return self.sess.run(self.value_flat, self._make_feed_dict(obs, state, mask))
register_policy('BansalMlpPolicy', MlpPolicyValue)
register_policy('BansalLstmPolicy', LSTMPolicy)
| true | true |
1c3cbe4e6cb755432845ea56bd4fb85e254cf1a6 | 5,608 | py | Python | tensorpack/predict/config.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | null | null | null | tensorpack/predict/config.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | null | null | null | tensorpack/predict/config.py | dan-anghel/tensorpack | 86fcffbc167e2b703b9abd17d41388311c90fe7c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# File: config.py
import six
from ..compat import tfv1 as tf
from ..train.model_desc import ModelDescBase
from ..tfutils import get_default_sess_config
from ..tfutils.sessinit import JustCurrentSession, SessionInit
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TowerFunc
from ..utils import logger
from ..utils.develop import log_deprecated
__all__ = ['PredictConfig']
class PredictConfig(object):
def __init__(self,
model=None,
tower_func=None,
input_signature=None,
input_names=None,
output_names=None,
session_creator=None,
session_init=None,
return_input=False,
create_graph=True,
inputs_desc=None
):
"""
Users need to provide enough arguments to create a tower function,
which will be used to construct the graph.
This can be provided in the following ways:
1. `model`: a :class:`ModelDesc` instance. It will contain a tower function by itself.
2. `tower_func`: a :class:`tfutils.TowerFunc` instance.
Provide a tower function instance directly.
3. `tower_func`: a symbolic function and `input_signature`: the signature of the function.
Provide both a function and its signature.
Example:
.. code-block:: python
config = PredictConfig(model=my_model,
inputs_names=['image'],
output_names=['linear/output', 'prediction'])
Args:
model (ModelDescBase): to be used to construct a tower function.
tower_func: a callable which takes input tensors (by positional args) and construct a tower.
or a :class:`tfutils.TowerFunc` instance.
input_signature ([tf.TensorSpec]): if tower_func is a plain function (instead of a TowerFunc),
this describes the list of inputs it takes.
input_names (list): a list of input tensor names. Defaults to match input_signature.
The name can be either the name of a tensor, or the name of one input of the tower.
output_names (list): a list of names of the output tensors to predict, the
tensors can be any tensor in the graph that's computable from the tensors correponding to `input_names`.
session_creator (tf.train.SessionCreator): how to create the
session. Defaults to :class:`NewSessionCreator()`.
session_init (SessionInit): how to initialize variables of the session.
Defaults to do nothing.
return_input (bool): same as in :attr:`PredictorBase.return_input`.
create_graph (bool): create a new graph, or use the default graph
when predictor is first initialized.
inputs_desc (list[tf.TensorSpec]): old (deprecated) name for `input_signature`.
"""
def assert_type(v, tp, name):
assert isinstance(v, tp), \
"Argument '{}' has to be type '{}', but an object of type '{}' found.".format(
name, tp.__name__, v.__class__.__name__)
if inputs_desc is not None:
log_deprecated("PredictConfig(inputs_desc)", "Use input_signature instead!", "2020-03-01")
assert input_signature is None, "Cannot set both inputs_desc and input_signature!"
input_signature = inputs_desc
if model is not None:
assert_type(model, ModelDescBase, 'model')
assert input_signature is None and tower_func is None
self.input_signature = model.get_input_signature()
self.tower_func = TowerFunc(model.build_graph, self.input_signature)
else:
if isinstance(tower_func, TowerFunc):
input_signature = tower_func.input_signature
assert input_signature is not None and tower_func is not None
self.input_signature = input_signature
self.tower_func = TowerFunc(tower_func, input_signature)
if session_init is None:
session_init = JustCurrentSession()
self.session_init = session_init
assert_type(self.session_init, SessionInit, 'session_init')
if session_creator is None:
self.session_creator = NewSessionCreator(config=get_default_sess_config())
else:
self.session_creator = session_creator
# inputs & outputs
self.input_names = input_names
if self.input_names is None:
self.input_names = [k.name for k in self.input_signature]
assert output_names is not None, "Argument 'output_names' is not provided!"
self.output_names = output_names
assert_type(self.output_names, list, 'output_names')
assert_type(self.input_names, list, 'input_names')
if len(self.input_names) == 0:
logger.warn('PredictConfig receives empty "input_names".')
for v in self.input_names:
assert_type(v, six.string_types, 'Each item in input_names')
assert len(self.output_names), "Argument 'output_names' cannot be empty!"
self.return_input = bool(return_input)
self.create_graph = bool(create_graph)
self.inputs_desc = input_signature # TODO a little bit of compatibility
def _maybe_create_graph(self):
if self.create_graph:
return tf.Graph()
return tf.get_default_graph()
| 43.138462 | 120 | 0.635877 |
import six
from ..compat import tfv1 as tf
from ..train.model_desc import ModelDescBase
from ..tfutils import get_default_sess_config
from ..tfutils.sessinit import JustCurrentSession, SessionInit
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TowerFunc
from ..utils import logger
from ..utils.develop import log_deprecated
__all__ = ['PredictConfig']
class PredictConfig(object):
def __init__(self,
model=None,
tower_func=None,
input_signature=None,
input_names=None,
output_names=None,
session_creator=None,
session_init=None,
return_input=False,
create_graph=True,
inputs_desc=None
):
def assert_type(v, tp, name):
assert isinstance(v, tp), \
"Argument '{}' has to be type '{}', but an object of type '{}' found.".format(
name, tp.__name__, v.__class__.__name__)
if inputs_desc is not None:
log_deprecated("PredictConfig(inputs_desc)", "Use input_signature instead!", "2020-03-01")
assert input_signature is None, "Cannot set both inputs_desc and input_signature!"
input_signature = inputs_desc
if model is not None:
assert_type(model, ModelDescBase, 'model')
assert input_signature is None and tower_func is None
self.input_signature = model.get_input_signature()
self.tower_func = TowerFunc(model.build_graph, self.input_signature)
else:
if isinstance(tower_func, TowerFunc):
input_signature = tower_func.input_signature
assert input_signature is not None and tower_func is not None
self.input_signature = input_signature
self.tower_func = TowerFunc(tower_func, input_signature)
if session_init is None:
session_init = JustCurrentSession()
self.session_init = session_init
assert_type(self.session_init, SessionInit, 'session_init')
if session_creator is None:
self.session_creator = NewSessionCreator(config=get_default_sess_config())
else:
self.session_creator = session_creator
self.input_names = input_names
if self.input_names is None:
self.input_names = [k.name for k in self.input_signature]
assert output_names is not None, "Argument 'output_names' is not provided!"
self.output_names = output_names
assert_type(self.output_names, list, 'output_names')
assert_type(self.input_names, list, 'input_names')
if len(self.input_names) == 0:
logger.warn('PredictConfig receives empty "input_names".')
for v in self.input_names:
assert_type(v, six.string_types, 'Each item in input_names')
assert len(self.output_names), "Argument 'output_names' cannot be empty!"
self.return_input = bool(return_input)
self.create_graph = bool(create_graph)
self.inputs_desc = input_signature
def _maybe_create_graph(self):
if self.create_graph:
return tf.Graph()
return tf.get_default_graph()
| true | true |
1c3cbff4ab1d40397a40289bd608c833483a7609 | 8,523 | py | Python | datalad_neuroimaging/extractors/bids.py | mslw/datalad-neuroimaging | d04807c41a8124cf3e7ff81ba8be7969a64fe7b6 | [
"MIT"
] | 14 | 2018-04-01T15:33:31.000Z | 2022-02-14T04:10:23.000Z | datalad_neuroimaging/extractors/bids.py | mslw/datalad-neuroimaging | d04807c41a8124cf3e7ff81ba8be7969a64fe7b6 | [
"MIT"
] | 98 | 2018-03-29T14:15:40.000Z | 2022-03-15T10:49:35.000Z | datalad_neuroimaging/extractors/bids.py | mslw/datalad-neuroimaging | d04807c41a8124cf3e7ff81ba8be7969a64fe7b6 | [
"MIT"
] | 10 | 2018-04-09T10:49:32.000Z | 2022-02-08T13:08:36.000Z | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""BIDS metadata extractor (http://bids.neuroimaging.io)"""
from __future__ import absolute_import
from math import isnan
# use pybids to evolve with the standard without having to track it too much
import bids
from bids import BIDSLayout
import re
from io import open
from os.path import join as opj
from os.path import exists
from os.path import curdir
from datalad.dochelpers import exc_str
from datalad.metadata.extractors.base import BaseMetadataExtractor
from datalad.metadata.definitions import vocabulary_id
from datalad.utils import assure_unicode
from datalad.support.external_versions import external_versions
from datalad import cfg
import logging
lgr = logging.getLogger('datalad.metadata.extractors.bids')
from datalad.log import log_progress
vocabulary = {
# characteristics (metadata keys)
"age(years)": {
'@id': "pato:0000011",
'unit': "uo:0000036",
'unit_label': "year",
'description': "age of a sample (organism) at the time of data acquisition in years"},
}
content_metakey_map = {
# go with plain 'id' as BIDS has this built-in conflict of subject/participant
# for the same concept
'participant_id': 'id',
'age': 'age(years)',
}
sex_label_map = {
'f': 'female',
'm': 'male',
}
class MetadataExtractor(BaseMetadataExtractor):
_dsdescr_fname = 'dataset_description.json'
_key2stdkey = {
'Name': 'name',
'License': 'license',
'Authors': 'author',
'ReferencesAndLinks': 'citation',
'Funding': 'fundedby',
'Description': 'description',
}
def get_metadata(self, dataset, content):
derivative_exist = exists(opj(self.ds.path, 'derivatives'))
bids = BIDSLayout(self.ds.path, derivatives=derivative_exist)
dsmeta = self._get_dsmeta(bids)
if not content:
return dsmeta, []
return dsmeta, self._get_cnmeta(bids)
def _get_dsmeta(self, bids):
context = {}
if hasattr(bids, 'get_dataset_description'):
# post 0.9.1
# https://github.com/bids-standard/pybids/pull/444
dsdesc_dict = bids.get_dataset_description()
else:
dsdesc_dict = bids.get_metadata(
opj(self.ds.path, self._dsdescr_fname)
)
meta = {
self._key2stdkey.get(k, k): v
for k, v in dsdesc_dict.items()
}
# TODO maybe normalize labels of standard licenses to definition URIs
# perform mapping
README_fname = opj(self.ds.path, 'README')
if not meta.get('description') and exists(README_fname):
# BIDS uses README to provide description, so if was not
# explicitly provided to possibly override longer README, let's just
# load README
with open(README_fname, 'rb') as f:
desc = assure_unicode(f.read())
meta['description'] = desc.strip()
# special case
# Could be None which we can't strip so or ''
bids_version = (meta.get('BIDSVersion', '') or '').strip()
bids_defurl = 'http://bids.neuroimaging.io'
if bids_version:
bids_defurl += '/bids_spec{}.pdf'.format(bids_version)
meta['conformsto'] = bids_defurl
context['bids'] = {
# not really a working URL, but BIDS doesn't provide term defs in
# any accessible way
'@id': '{}#'.format(bids_defurl),
'description': 'ad-hoc vocabulary for the Brain Imaging Data Structure (BIDS) standard',
'type': vocabulary_id,
}
context.update(vocabulary)
meta['@context'] = context
return meta
def _get_cnmeta(self, bids):
# TODO any custom handling of participants infos should eventually
# be done by pybids in one way or another
path_props = {}
participants_fname = opj(self.ds.path, 'participants.tsv')
if exists(participants_fname):
try:
for rx, info in yield_participant_info(bids):
path_props[rx] = {'subject': info}
except Exception as exc:
if isinstance(exc, ImportError):
raise exc
lgr.warning(
"Failed to load participants info due to: %s. Skipping the rest of file",
exc_str(exc)
)
log_progress(
lgr.info,
'extractorbids',
'Start BIDS metadata extraction from %s', self.ds,
total=len(self.paths),
label='BIDS metadata extraction',
unit=' Files',
)
# now go over all files in the dataset and query pybids for its take
# on each of them
for f in self.paths:
absfp = opj(self.ds.path, f)
log_progress(
lgr.info,
'extractorbids',
'Extract BIDS metadata from %s', absfp,
update=1,
increment=True)
# BIDS carries a substantial portion of its metadata in JSON
# sidecar files. we ignore them here completely
# this might yield some false-negatives in theory, but
# this case has not been observed in practice yet, hence
# doing it cheap for now
if f.endswith('.json'):
continue
md = {}
try:
md.update(
{k: v
for k, v in bids.get_metadata(
opj(self.ds.path, f),
include_entities=True).items()
# no nested structures for now (can be monstrous when DICOM
# metadata is embedded)
if not isinstance(v, dict)})
except ValueError as e:
lgr.debug(
'PyBIDS errored on file %s in %s: %s '
'(possibly not BIDS-compliant or not recognized',
f, self.ds, exc_str(e))
lgr.debug('no usable BIDS metadata for %s in %s: %s',
f, self.ds, exc_str(e))
# do not raise here:
# https://github.com/datalad/datalad-neuroimaging/issues/34
except Exception as e:
lgr.debug('no usable BIDS metadata for %s in %s: %s',
f, self.ds, exc_str(e))
if cfg.get('datalad.runtime.raiseonerror'):
raise
# no check al props from other sources and apply them
for rx in path_props:
if rx.match(f):
md.update(path_props[rx])
yield f, md
log_progress(
lgr.info,
'extractorbids',
'Finished BIDS metadata extraction from %s', self.ds
)
def yield_participant_info(bids):
for bidsvars in bids.get_collections(
level='dataset')[0].to_df().to_dict(orient='records'):
props = dict(id=assure_unicode(bidsvars.pop('subject')))
for p in bidsvars:
# take away some ambiguity
normk = assure_unicode(p).lower()
hk = content_metakey_map.get(normk, normk)
val = assure_unicode(bidsvars[p])
if hk in ('sex', 'gender'):
if hasattr(val, 'lower'):
val = val.lower()
elif isinstance(val, float) and isnan(val):
# pybids reports 'n/a' is NaN
val = 'n/a'
val = sex_label_map.get(val, val)
if hk == 'suffix' and val == 'participants':
# regression in PyBIDS 0.7.1, should be fixed in 0.8
# https://github.com/bids-standard/pybids/issues/380
# TODO: remove workaround whenever we depend on pybids >= 0.8
# after verifying that it is not succeptable
continue
if val:
props[hk] = val
if props:
yield re.compile(r'^sub-{}/.*'.format(props['id'])), props
| 36.896104 | 100 | 0.551801 |
es. we ignore them here completely
# this might yield some false-negatives in theory, but
# this case has not been observed in practice yet, hence
# doing it cheap for now
if f.endswith('.json'):
continue
md = {}
try:
md.update(
{k: v
for k, v in bids.get_metadata(
opj(self.ds.path, f),
include_entities=True).items()
# no nested structures for now (can be monstrous when DICOM
# metadata is embedded)
if not isinstance(v, dict)})
except ValueError as e:
lgr.debug(
'PyBIDS errored on file %s in %s: %s '
'(possibly not BIDS-compliant or not recognized',
f, self.ds, exc_str(e))
lgr.debug('no usable BIDS metadata for %s in %s: %s',
f, self.ds, exc_str(e))
# do not raise here:
# https://github.com/datalad/datalad-neuroimaging/issues/34
except Exception as e:
lgr.debug('no usable BIDS metadata for %s in %s: %s',
f, self.ds, exc_str(e))
if cfg.get('datalad.runtime.raiseonerror'):
raise
# no check al props from other sources and apply them
for rx in path_props:
if rx.match(f):
md.update(path_props[rx])
yield f, md
log_progress(
lgr.info,
'extractorbids',
'Finished BIDS metadata extraction from %s', self.ds
)
def yield_participant_info(bids):
for bidsvars in bids.get_collections(
level='dataset')[0].to_df().to_dict(orient='records'):
props = dict(id=assure_unicode(bidsvars.pop('subject')))
for p in bidsvars:
# take away some ambiguity
normk = assure_unicode(p).lower()
hk = content_metakey_map.get(normk, normk)
val = assure_unicode(bidsvars[p])
if hk in ('sex', 'gender'):
if hasattr(val, 'lower'):
val = val.lower()
elif isinstance(val, float) and isnan(val):
# pybids reports 'n/a' is NaN
val = 'n/a'
val = sex_label_map.get(val, val)
if hk == 'suffix' and val == 'participants':
# regression in PyBIDS 0.7.1, should be fixed in 0.8
# https://github.com/bids-standard/pybids/issues/380
# TODO: remove workaround whenever we depend on pybids >= 0.8
# after verifying that it is not succeptable
continue
if val:
props[hk] = val
if props:
yield re.compile(r'^sub-{}/.*'.format(props['id'])), props
| true | true |
1c3cc00238440b522a77df3b0048d924f52beba3 | 2,493 | py | Python | 201005/students_stat.py | EvgenDEP1/python-basics | 5afee7422bf25ba9a310d4bc2cf3c90c506b2018 | [
"MIT"
] | null | null | null | 201005/students_stat.py | EvgenDEP1/python-basics | 5afee7422bf25ba9a310d4bc2cf3c90c506b2018 | [
"MIT"
] | null | null | null | 201005/students_stat.py | EvgenDEP1/python-basics | 5afee7422bf25ba9a310d4bc2cf3c90c506b2018 | [
"MIT"
] | null | null | null | import json
def parse_marks(f_name):
result = []
with open(f_name, 'r', encoding='utf-8') as f:
for row in f.read().splitlines():
last_name, first_name, patronymic, row_marks = row.split(maxsplit=3)
patronymic = patronymic.strip(',')
marks = []
for mark in row_marks.split(','):
marks.append(int(mark.strip()))
avg_mark = sum(marks) / len(marks)
result.append([last_name, first_name, patronymic, marks, avg_mark])
return result
def parse_marks_as_dict(f_name):
result = []
with open(f_name, 'r', encoding='utf-8') as f:
for row in f.read().splitlines():
last_name, first_name, patronymic, row_marks = row.split(maxsplit=3)
patronymic = patronymic.strip(',')
marks = []
for mark in row_marks.split(','):
marks.append(int(mark.strip()))
avg_mark = sum(marks) / len(marks)
result.append(
{
'last_name': last_name,
'first_name': first_name,
'patronymic': patronymic,
'marks': marks,
'avg_mark': avg_mark
}
)
# result.append(
# {
# 0: last_name,
# 1: first_name,
# 2: patronymic,
# 3: marks,
# 4: avg_mark
# }
# )
return result
def show_marks(parsed_marks, raw=True, sep=' '):
for row in parsed_marks:
if raw:
print(row)
else:
print(sep.join(map(str, row)))
def show_students(parsed_marks):
for row in parsed_marks:
print(row[0], row[1], row[2])
def show_students_dict(parsed_marks_as_dict):
for row in parsed_marks_as_dict:
print(row['first_name'], row['last_name'], row['patronymic'])
# print(row[0], row[1], row[2])
def save_marks(f_name, parsed_marks):
head = ['last_name', 'first_name', 'patronymic', 'marks', 'avg_mark']
with open(f_name, 'w', encoding='utf-8') as f:
f.write(', '.join(head))
f.write('\n')
for row in parsed_marks:
f.write(', '.join(map(str, row)))
f.write('\n')
def save_marks_as_dict(f_name, parsed_marks_as_dict):
with open(f_name, 'w', encoding='utf-8') as f:
json.dump(parsed_marks_as_dict, f)
| 30.036145 | 80 | 0.515844 | import json
def parse_marks(f_name):
result = []
with open(f_name, 'r', encoding='utf-8') as f:
for row in f.read().splitlines():
last_name, first_name, patronymic, row_marks = row.split(maxsplit=3)
patronymic = patronymic.strip(',')
marks = []
for mark in row_marks.split(','):
marks.append(int(mark.strip()))
avg_mark = sum(marks) / len(marks)
result.append([last_name, first_name, patronymic, marks, avg_mark])
return result
def parse_marks_as_dict(f_name):
result = []
with open(f_name, 'r', encoding='utf-8') as f:
for row in f.read().splitlines():
last_name, first_name, patronymic, row_marks = row.split(maxsplit=3)
patronymic = patronymic.strip(',')
marks = []
for mark in row_marks.split(','):
marks.append(int(mark.strip()))
avg_mark = sum(marks) / len(marks)
result.append(
{
'last_name': last_name,
'first_name': first_name,
'patronymic': patronymic,
'marks': marks,
'avg_mark': avg_mark
}
)
return result
def show_marks(parsed_marks, raw=True, sep=' '):
for row in parsed_marks:
if raw:
print(row)
else:
print(sep.join(map(str, row)))
def show_students(parsed_marks):
for row in parsed_marks:
print(row[0], row[1], row[2])
def show_students_dict(parsed_marks_as_dict):
for row in parsed_marks_as_dict:
print(row['first_name'], row['last_name'], row['patronymic'])
def save_marks(f_name, parsed_marks):
head = ['last_name', 'first_name', 'patronymic', 'marks', 'avg_mark']
with open(f_name, 'w', encoding='utf-8') as f:
f.write(', '.join(head))
f.write('\n')
for row in parsed_marks:
f.write(', '.join(map(str, row)))
f.write('\n')
def save_marks_as_dict(f_name, parsed_marks_as_dict):
with open(f_name, 'w', encoding='utf-8') as f:
json.dump(parsed_marks_as_dict, f)
| true | true |
1c3cc016257a366db21459645375b0508521f0af | 2,905 | py | Python | sfaira/data/dataloaders/loaders/d10_1101_753806/human_lungparenchyma_2020_10xsequencing_habermann_001.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | sfaira/data/dataloaders/loaders/d10_1101_753806/human_lungparenchyma_2020_10xsequencing_habermann_001.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | sfaira/data/dataloaders/loaders/d10_1101_753806/human_lungparenchyma_2020_10xsequencing_habermann_001.py | johnmous/sfaira | c50240a74530e614ab7681bf9c63b04cb815b361 | [
"BSD-3-Clause"
] | null | null | null | import anndata
import os
import pandas as pd
from sfaira.data import DatasetBase
class Dataset(DatasetBase):
"""
TODO extra meta data in obs2
age: columns "Age" contains integer entries and Unknown
diseases: column "Diagnosis" contains entries NSIP, cHP, Control, IPF, ILD, Sarcoidosis
column Tobacco contains entries Y,N
ethnicity: column "Ethnicity" contains entries African_American, Caucasian, Hispanic, Unknown
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.download_url_data = [
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fmatrix%2Emtx%2Egz",
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fgenes%2Etsv%2Egz",
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fbarcodes%2Etsv%2Egz"
]
self.download_url_meta = [
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5FIPF%5Fmetadata%2Ecsv%2Egz",
"https://advances.sciencemag.org/highwire/filestream/234522/field_highwire_adjunct_files/2/aba1972_Table_S2.csv",
]
self.author = "Habermann"
self.doi_journal = "10.1126/sciadv.aba1972"
self.doi_preprint = "10.1101/753806"
self.layer_counts = "X"
self.organ = "lung parenchyma"
self.organism = "Homo sapiens"
self.primary_data = True
self.assay_sc_obs_key = "Chemistry"
self.year = 2020
self.sample_source = "primary_tissue"
self.sex_obs_key = "Gender"
self.tech_sample_obs_key = "Sample_Name"
self.feature_symbol_var_key = "index"
self.feature_type = "rna"
self.cell_type_obs_key = "celltype"
self.state_exact_obs_key = "Diagnosis"
self.set_dataset_id(idx=1)
def load(data_dir, **kwargs):
fn = [
os.path.join(data_dir, "GSE135893_matrix.mtx.gz"),
os.path.join(data_dir, "GSE135893_genes.tsv.gz"),
os.path.join(data_dir, "GSE135893_barcodes.tsv.gz"),
os.path.join(data_dir, "GSE135893_IPF_metadata.csv.gz"),
os.path.join(data_dir, "aba1972_Table_S2.csv"),
]
adata = anndata.read_mtx(fn[0]).T
adata.var = pd.read_csv(fn[1], index_col=0, header=None, names=["ids"])
adata.obs = pd.read_csv(fn[2], index_col=0, header=None, names=["barcodes"])
obs = pd.read_csv(fn[3], index_col=0)
obs2 = pd.read_csv(fn[4], index_col=0)
obs["Chemistry"] = [{"3_prime_V2": "10x 3' v2", "5_prime": "10x 5' v1"}[obs2.loc[x, "Chemistry"]]
for x in obs["orig.ident"].values]
obs["Gender"] = [{"F": "female", "M": "male", "Unknown": "unknown"}[obs2.loc[x, "Gender"]]
for x in obs["orig.ident"].values]
adata = adata[obs.index.tolist(), :].copy()
adata.obs = obs
return adata
| 39.256757 | 125 | 0.64475 | import anndata
import os
import pandas as pd
from sfaira.data import DatasetBase
class Dataset(DatasetBase):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.download_url_data = [
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fmatrix%2Emtx%2Egz",
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fgenes%2Etsv%2Egz",
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5Fbarcodes%2Etsv%2Egz"
]
self.download_url_meta = [
"https://ftp.ncbi.nlm.nih.gov/geo/series/GSE135nnn/GSE135893/suppl/GSE135893%5FIPF%5Fmetadata%2Ecsv%2Egz",
"https://advances.sciencemag.org/highwire/filestream/234522/field_highwire_adjunct_files/2/aba1972_Table_S2.csv",
]
self.author = "Habermann"
self.doi_journal = "10.1126/sciadv.aba1972"
self.doi_preprint = "10.1101/753806"
self.layer_counts = "X"
self.organ = "lung parenchyma"
self.organism = "Homo sapiens"
self.primary_data = True
self.assay_sc_obs_key = "Chemistry"
self.year = 2020
self.sample_source = "primary_tissue"
self.sex_obs_key = "Gender"
self.tech_sample_obs_key = "Sample_Name"
self.feature_symbol_var_key = "index"
self.feature_type = "rna"
self.cell_type_obs_key = "celltype"
self.state_exact_obs_key = "Diagnosis"
self.set_dataset_id(idx=1)
def load(data_dir, **kwargs):
fn = [
os.path.join(data_dir, "GSE135893_matrix.mtx.gz"),
os.path.join(data_dir, "GSE135893_genes.tsv.gz"),
os.path.join(data_dir, "GSE135893_barcodes.tsv.gz"),
os.path.join(data_dir, "GSE135893_IPF_metadata.csv.gz"),
os.path.join(data_dir, "aba1972_Table_S2.csv"),
]
adata = anndata.read_mtx(fn[0]).T
adata.var = pd.read_csv(fn[1], index_col=0, header=None, names=["ids"])
adata.obs = pd.read_csv(fn[2], index_col=0, header=None, names=["barcodes"])
obs = pd.read_csv(fn[3], index_col=0)
obs2 = pd.read_csv(fn[4], index_col=0)
obs["Chemistry"] = [{"3_prime_V2": "10x 3' v2", "5_prime": "10x 5' v1"}[obs2.loc[x, "Chemistry"]]
for x in obs["orig.ident"].values]
obs["Gender"] = [{"F": "female", "M": "male", "Unknown": "unknown"}[obs2.loc[x, "Gender"]]
for x in obs["orig.ident"].values]
adata = adata[obs.index.tolist(), :].copy()
adata.obs = obs
return adata
| true | true |
1c3cc03dfad022d92941d960e408e42ce7dbf4d1 | 45 | py | Python | Game/python/python_tests.py | TimothyThompkins/InteractiveGame | 06042a217ede1239b4a3dd8e5adaa5e28ef7095f | [
"MIT"
] | null | null | null | Game/python/python_tests.py | TimothyThompkins/InteractiveGame | 06042a217ede1239b4a3dd8e5adaa5e28ef7095f | [
"MIT"
] | null | null | null | Game/python/python_tests.py | TimothyThompkins/InteractiveGame | 06042a217ede1239b4a3dd8e5adaa5e28ef7095f | [
"MIT"
] | null | null | null | elements = bytes([255])
print (elements[0])
| 11.25 | 23 | 0.666667 | elements = bytes([255])
print (elements[0])
| true | true |
1c3cc04ee433c89a91e2b709aabbd8eff08d03bc | 2,391 | py | Python | tests/covariance/test_empirical.py | OVVO-Financial/precise | ce744cadfca18f4ab77c68cc27bf8d712561127f | [
"MIT"
] | null | null | null | tests/covariance/test_empirical.py | OVVO-Financial/precise | ce744cadfca18f4ab77c68cc27bf8d712561127f | [
"MIT"
] | null | null | null | tests/covariance/test_empirical.py | OVVO-Financial/precise | ce744cadfca18f4ab77c68cc27bf8d712561127f | [
"MIT"
] | null | null | null |
import numpy as np
from precise.skaters.covariance.runempfactory import emp_pcov, merge_emp_scov
from precise.skatertools.syntheticdata.miscellaneous import create_correlated_dataset
from precise.skaters.covarianceutil.covfunctions import cov_to_corrcoef
from precise.skaters.covarianceutil.datacovfunctions import pcov_of_columns
# Some cut and paste https://carstenschelp.github.io/2019/05/12/Online_Covariance_Algorithm_002.html
# However I've removed the confusion between sample and population estimates, and taken the tolerance
# down to 1e-10
TOL = 1E-10
def test_onlineempirical():
data = create_correlated_dataset(100, (2.2, 4.4, 1.5), np.array([[0.2, 0.5, 0.7],[0.3, 0.2, 0.2],[0.5,0.3,0.1]]), (1, 5, 3))
np_corrcoef = np.corrcoef(data, rowvar=False)
s = {}
for j,x in enumerate(data[:2]):
s = emp_pcov(s=s, x=x, k=1)
if j>=1:
np_mean = np.mean(data[:j+1,:],axis=0)
np_pcov = np.cov(data[:j+1,:], rowvar=False, bias=True)
np_pcov2 = pcov_of_columns(data[:j + 1, :])
np_corrcoef = np.corrcoef(data[:j+1,:], rowvar=False)
ocorr = cov_to_corrcoef(s['pcov'])
assert np.isclose(np_pcov, s['pcov'], atol=TOL).all()
assert np.isclose(np_pcov2, s['pcov'], atol=TOL).all()
assert np.isclose(np_mean, s['mean'], atol=TOL).all()
assert np.isclose(np_corrcoef, ocorr, atol=TOL).all()
def test_merging():
data_part1 = create_correlated_dataset(500, (2.2, 4.4, 1.5), np.array([[0.2, 0.5, 0.7], [0.3, 0.2, 0.2], [0.5, 0.3, 0.1]]), (1, 5, 3))
data_part2 = create_correlated_dataset( \
1000, (5, 6, 2), np.array([[0.2, 0.5, 0.7], [0.3, 0.2, 0.2], [0.5, 0.3, 0.1]]), (1, 5, 3))
ocov_part1 = {}
ocov_part2 = {}
ocov_both = {}
for row in data_part1:
ocov_part1 = emp_pcov(s=ocov_part1, x=row)
ocov_both = emp_pcov(s=ocov_both, x=row)
for row in data_part2:
ocov_part2 = emp_pcov(s=ocov_part2, x=row)
ocov_both = emp_pcov(s=ocov_both, x=row)
ocov_merged = merge_emp_scov(s=ocov_part1, other_s=ocov_part2)
assert ocov_both['n_samples'] == ocov_merged['n_samples']
assert np.isclose(ocov_both['mean'], ocov_merged['mean']).all()
assert np.isclose(ocov_both['pcov'], ocov_merged['pcov']).all()
if __name__=='__main__':
test_onlineempirical()
test_merging()
| 39.196721 | 138 | 0.645337 |
import numpy as np
from precise.skaters.covariance.runempfactory import emp_pcov, merge_emp_scov
from precise.skatertools.syntheticdata.miscellaneous import create_correlated_dataset
from precise.skaters.covarianceutil.covfunctions import cov_to_corrcoef
from precise.skaters.covarianceutil.datacovfunctions import pcov_of_columns
# down to 1e-10
TOL = 1E-10
def test_onlineempirical():
data = create_correlated_dataset(100, (2.2, 4.4, 1.5), np.array([[0.2, 0.5, 0.7],[0.3, 0.2, 0.2],[0.5,0.3,0.1]]), (1, 5, 3))
np_corrcoef = np.corrcoef(data, rowvar=False)
s = {}
for j,x in enumerate(data[:2]):
s = emp_pcov(s=s, x=x, k=1)
if j>=1:
np_mean = np.mean(data[:j+1,:],axis=0)
np_pcov = np.cov(data[:j+1,:], rowvar=False, bias=True)
np_pcov2 = pcov_of_columns(data[:j + 1, :])
np_corrcoef = np.corrcoef(data[:j+1,:], rowvar=False)
ocorr = cov_to_corrcoef(s['pcov'])
assert np.isclose(np_pcov, s['pcov'], atol=TOL).all()
assert np.isclose(np_pcov2, s['pcov'], atol=TOL).all()
assert np.isclose(np_mean, s['mean'], atol=TOL).all()
assert np.isclose(np_corrcoef, ocorr, atol=TOL).all()
def test_merging():
data_part1 = create_correlated_dataset(500, (2.2, 4.4, 1.5), np.array([[0.2, 0.5, 0.7], [0.3, 0.2, 0.2], [0.5, 0.3, 0.1]]), (1, 5, 3))
data_part2 = create_correlated_dataset( \
1000, (5, 6, 2), np.array([[0.2, 0.5, 0.7], [0.3, 0.2, 0.2], [0.5, 0.3, 0.1]]), (1, 5, 3))
ocov_part1 = {}
ocov_part2 = {}
ocov_both = {}
for row in data_part1:
ocov_part1 = emp_pcov(s=ocov_part1, x=row)
ocov_both = emp_pcov(s=ocov_both, x=row)
for row in data_part2:
ocov_part2 = emp_pcov(s=ocov_part2, x=row)
ocov_both = emp_pcov(s=ocov_both, x=row)
ocov_merged = merge_emp_scov(s=ocov_part1, other_s=ocov_part2)
assert ocov_both['n_samples'] == ocov_merged['n_samples']
assert np.isclose(ocov_both['mean'], ocov_merged['mean']).all()
assert np.isclose(ocov_both['pcov'], ocov_merged['pcov']).all()
if __name__=='__main__':
test_onlineempirical()
test_merging()
| true | true |
1c3cc1203f4cac5f1b2b738f6bc37136744b4395 | 3,705 | py | Python | core/management/commands/maintenance.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 197 | 2016-12-08T02:33:32.000Z | 2022-03-23T14:27:47.000Z | core/management/commands/maintenance.py | simpsonw/atmosphere | 3a5203ef0b563de3a0e8c8c8715df88186532d7a | [
"BSD-3-Clause"
] | 385 | 2017-01-03T22:51:46.000Z | 2020-12-16T16:20:42.000Z | core/management/commands/maintenance.py | benlazarine/atmosphere | 38fad8e4002e510e8b4294f2bb5bc75e8e1817fa | [
"BSD-3-Clause"
] | 50 | 2016-12-08T08:32:25.000Z | 2021-12-10T00:21:39.000Z | import os
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from dateutil.parser import parse
from core.models import MaintenanceRecord
from atmosphere.version import git_branch
class Command(BaseCommand):
help = 'Allows starting and stopping maintenance'
def add_arguments(self, parser):
default_title = _default_title()
default_message = _default_message()
default_start_date = timezone.localtime()
parser.add_argument("command", help="commands: start, stop, show")
parser.add_argument(
"--title",
default=default_title,
help="Title of maintenance record"
)
parser.add_argument(
"--message",
default=default_message,
help="Use this as the message of maintenance record"
)
parser.add_argument(
"--start-date",
default=default_start_date,
help="Start date of maintenance record, default is now. Many "
"time formats are accepted. Use --dry-run to ensure "
"correct time."
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Only print what would occur"
)
def handle_start(self, **options):
start_date = options['start_date']
if isinstance(start_date, str):
try:
start_date = parse(start_date)
except Exception as exc:
raise CommandError("Error parsing start_date: {}".format(exc))
record = MaintenanceRecord(
title=options['title'],
message=options['message'],
start_date=start_date
)
if options['dry_run']:
self.stdout.write(
"{}: {}".format(self.style.NOTICE("Dry run"), record)
)
else:
record.save()
self.stdout.write(
"{}: {}".format(self.style.SUCCESS("Record created"), record)
)
def handle_stop(self, **options):
records = MaintenanceRecord.active()
if not records:
self.stdout.write("There are no active records")
return
for record in records:
record.end_date = timezone.now()
if options['dry_run']:
self.stdout.write(
"{}: {}".format(self.style.NOTICE("Dry run"), record)
)
continue
else:
record.save()
self.stdout.write(
"{}: {}".format(
self.style.SUCCESS("Record enddated"), record
)
)
def handle_show(self, **options):
records = MaintenanceRecord.active()
if not records:
self.stdout.write("There are no active records")
return
for record in records:
self.stdout.write(str(record))
def handle(self, **options):
cmd = options['command']
handler = getattr(self, "handle_{}".format(cmd), _raise_unknown)
handler(**options)
def _default_title():
now = timezone.localdate()
git_directory = os.path.join(settings.PROJECT_ROOT, ".git")
branch_name = git_branch(git_directory=git_directory)
return "{0}/{1} ({2}) Maintenance".format(now.month, now.day, branch_name)
def _default_message():
return "Atmosphere is down for a Scheduled Maintenance"
def _raise_unknown(*args, **options):
cmd = options['command']
raise CommandError("Unknown command: {}".format(cmd))
| 31.134454 | 78 | 0.57085 | import os
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.utils import timezone
from dateutil.parser import parse
from core.models import MaintenanceRecord
from atmosphere.version import git_branch
class Command(BaseCommand):
help = 'Allows starting and stopping maintenance'
def add_arguments(self, parser):
default_title = _default_title()
default_message = _default_message()
default_start_date = timezone.localtime()
parser.add_argument("command", help="commands: start, stop, show")
parser.add_argument(
"--title",
default=default_title,
help="Title of maintenance record"
)
parser.add_argument(
"--message",
default=default_message,
help="Use this as the message of maintenance record"
)
parser.add_argument(
"--start-date",
default=default_start_date,
help="Start date of maintenance record, default is now. Many "
"time formats are accepted. Use --dry-run to ensure "
"correct time."
)
parser.add_argument(
"--dry-run",
action="store_true",
default=False,
help="Only print what would occur"
)
def handle_start(self, **options):
start_date = options['start_date']
if isinstance(start_date, str):
try:
start_date = parse(start_date)
except Exception as exc:
raise CommandError("Error parsing start_date: {}".format(exc))
record = MaintenanceRecord(
title=options['title'],
message=options['message'],
start_date=start_date
)
if options['dry_run']:
self.stdout.write(
"{}: {}".format(self.style.NOTICE("Dry run"), record)
)
else:
record.save()
self.stdout.write(
"{}: {}".format(self.style.SUCCESS("Record created"), record)
)
def handle_stop(self, **options):
records = MaintenanceRecord.active()
if not records:
self.stdout.write("There are no active records")
return
for record in records:
record.end_date = timezone.now()
if options['dry_run']:
self.stdout.write(
"{}: {}".format(self.style.NOTICE("Dry run"), record)
)
continue
else:
record.save()
self.stdout.write(
"{}: {}".format(
self.style.SUCCESS("Record enddated"), record
)
)
def handle_show(self, **options):
records = MaintenanceRecord.active()
if not records:
self.stdout.write("There are no active records")
return
for record in records:
self.stdout.write(str(record))
def handle(self, **options):
cmd = options['command']
handler = getattr(self, "handle_{}".format(cmd), _raise_unknown)
handler(**options)
def _default_title():
now = timezone.localdate()
git_directory = os.path.join(settings.PROJECT_ROOT, ".git")
branch_name = git_branch(git_directory=git_directory)
return "{0}/{1} ({2}) Maintenance".format(now.month, now.day, branch_name)
def _default_message():
return "Atmosphere is down for a Scheduled Maintenance"
def _raise_unknown(*args, **options):
cmd = options['command']
raise CommandError("Unknown command: {}".format(cmd))
| true | true |
1c3cc1e31cf57871ffbc2ad2a85ff375039f4f9c | 821 | py | Python | files_utils.py | acanakoglu/GeCo_5.0 | a67d892e9a43c2931517883f60621c4b4f6cc0d0 | [
"Apache-2.0"
] | null | null | null | files_utils.py | acanakoglu/GeCo_5.0 | a67d892e9a43c2931517883f60621c4b4f6cc0d0 | [
"Apache-2.0"
] | null | null | null | files_utils.py | acanakoglu/GeCo_5.0 | a67d892e9a43c2931517883f60621c4b4f6cc0d0 | [
"Apache-2.0"
] | null | null | null | import os
def get_file_name(path):
return path.split('/')[-1]
def list_files(directory):
files = os.listdir(directory)
files = [os.path.join(directory, f) for f in files]
return files
def get_files_triple(directory):
ls_list = list_files(directory)
meta_set = set(filter(lambda x: x.endswith("meta"), ls_list))
return [(get_file_name(x), x, x + ".meta") for x in ls_list if x + ".meta" in meta_set]
# def get_schema_path(ls_list):
# return next(filter(lambda x: x.endswith("schema.xml"), ls_list)) # possibly test.schema
# def parse_schema(schema_path):
# # schema_path = get_schema_path(ls_list)
# with hdfs.open(schema_path) as f:
# tree = ET.parse(f)
# return [(x.text, x.get('type')) for x in tree.getiterator() if x.tag.endswith("field")]
#
#
#
| 25.65625 | 97 | 0.65408 | import os
def get_file_name(path):
return path.split('/')[-1]
def list_files(directory):
files = os.listdir(directory)
files = [os.path.join(directory, f) for f in files]
return files
def get_files_triple(directory):
ls_list = list_files(directory)
meta_set = set(filter(lambda x: x.endswith("meta"), ls_list))
return [(get_file_name(x), x, x + ".meta") for x in ls_list if x + ".meta" in meta_set]
| true | true |
1c3cc1f7f249638025ecc80ed694aa3e0cad6b1e | 1,068 | py | Python | PythonDownload/pythonexercicios/ex059.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | 1 | 2021-08-10T15:00:34.000Z | 2021-08-10T15:00:34.000Z | PythonDownload/pythonexercicios/ex059.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | null | null | null | PythonDownload/pythonexercicios/ex059.py | GitGuii/PythonExs | afab77b311d23f7ed88d94e9ce927653cf648b29 | [
"MIT"
] | null | null | null | n1 = int(input("Digite o primeiro numero: "))
n2 = int(input("Digite o segundo numero: "))
maior = 0
menu = 9
while menu != 0:
menu = int(input('''Digite o numero da opção desejada:
1) Soma
2) Multiplicar
3) maior
4) trocar numeros
0) sair '''))
if menu == 1:
print("a soma entre {} e {} é de".format(n1, n2), n1+n2)
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 2:
print("Multiplicação entre {} e {} é de".format(n1, n2), n1*n2)
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print("o maior numero é {}".format(maior))
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 4:
n1 = int(input("Digite o primeiro numero para efetuar a troca: "))
n2 = int(input("Digite o segundo numero para efetuar a troca: "))
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print("Fim do programa Obrigado!")
| 33.375 | 74 | 0.43633 | n1 = int(input("Digite o primeiro numero: "))
n2 = int(input("Digite o segundo numero: "))
maior = 0
menu = 9
while menu != 0:
menu = int(input('''Digite o numero da opção desejada:
1) Soma
2) Multiplicar
3) maior
4) trocar numeros
0) sair '''))
if menu == 1:
print("a soma entre {} e {} é de".format(n1, n2), n1+n2)
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 2:
print("Multiplicação entre {} e {} é de".format(n1, n2), n1*n2)
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 3:
if n1 > n2:
maior = n1
else:
maior = n2
print("o maior numero é {}".format(maior))
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
elif menu == 4:
n1 = int(input("Digite o primeiro numero para efetuar a troca: "))
n2 = int(input("Digite o segundo numero para efetuar a troca: "))
print("-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print("Fim do programa Obrigado!")
| true | true |
1c3cc22d352a48c21060efd3f5f8e9683881ae8d | 3,546 | py | Python | 02_tensor_basics.py | KOPFYF/pytorchTutorial | 4ed7642049a0fba46edd505a23ffcea9d8e03679 | [
"MIT"
] | null | null | null | 02_tensor_basics.py | KOPFYF/pytorchTutorial | 4ed7642049a0fba46edd505a23ffcea9d8e03679 | [
"MIT"
] | null | null | null | 02_tensor_basics.py | KOPFYF/pytorchTutorial | 4ed7642049a0fba46edd505a23ffcea9d8e03679 | [
"MIT"
] | null | null | null | import re
import torch
# Everything in pytorch is based on Tensor operations.
# A tensor can have different dimensions
# so it can be 1d, 2d, or even 3d and higher
# scalar, vector, matrix, tensor
# torch.empty(size): uninitiallized
x = torch.empty(1) # scalar
print(x)
x = torch.empty(3) # vector, 1D
print(x)
x = torch.empty(2,3) # matrix, 2D
print(x)
x = torch.empty(2,2,3) # tensor, 3 dimensions
#x = torch.empty(2,2,2,3) # tensor, 4 dimensions
print(x)
# torch.rand(size): random numbers [0, 1]
x = torch.rand(5, 3)
print(x)
# torch.zeros(size), fill with 0
# torch.ones(size), fill with 1
x = torch.zeros(5, 3)
print(x)
# check size
print(x.size())
# check data type
print(x.dtype) # float,
# specify types, float32 default
x = torch.zeros(5, 3, dtype=torch.int)
x = torch.zeros(5, 3, dtype=torch.double)
x = torch.zeros(5, 3, dtype=torch.float16)
print(x)
# check type
print(x.dtype)
# construct from data, list
x = torch.tensor([5.5, 3])
print(x.size())
# requires_grad argument
# This will tell pytorch that it will need to calculate the gradients for this tensor
# later in your optimization steps
# i.e. this is a variable in your model that you want to optimize
x = torch.tensor([5.5, 3], requires_grad=True)
# Operations
y = torch.rand(2, 2)
x = torch.rand(2, 2)
# elementwise addition
z = x + y
z = torch.add(x,y) # same thing
# in place addition, everythin with a trailing underscore is an inplace operation
# i.e. it will modify the variable
# y.add_(x)
# substraction
z = x - y
z = torch.sub(x, y)
# multiplication
z = x * y
z = torch.mul(x,y)
# division
z = x / y
z = torch.div(x,y)
# Slicing
x = torch.rand(5,3)
print(x)
print(x[:, 0]) # all rows, column 0
print(x[1, :]) # row 1, all columns
print(x[1, 1]) # element at 1, 1
# Get the actual value if only 1 element in your tensor
print('item:', x[1,1].item())
# Reshape with torch.view()
x = torch.randn(4, 4)
y = x.view(16) # 1 dim
z = x.view(-1, 8) # the size -1 is inferred from other dimensions
# if -1 it pytorch will automatically determine the necessary size
print(x.size(), y.size(), z.size()) # torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])
# Numpy
# Converting a Torch Tensor to a NumPy array and vice versa is very easy
a = torch.ones(5)
print(a) # tensor([1., 1., 1., 1., 1.])
# torch to numpy with .numpy(), shallow copy, same address
b = a.numpy()
print(b) # [1. 1. 1. 1. 1.]
print(type(b)) # <class 'numpy.ndarray'>
# Carful: If the Tensor is on the CPU (not the GPU),
# both objects will share the same memory location, so changing one
# will also change the other
a.add_(1)
print(a)
print(b) # b changed as well
# numpy to torch with .from_numpy(x)
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a) # shallow copy again! check GPU
print(a)
print(b)
# again be careful when modifying
a += 1
print(a)
print(b)
# by default all tensors are created on the CPU,
# but you can also move them to the GPU (only if it's available )
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on **GPU**
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
# z = z.numpy() # not possible because numpy cannot handle GPU tenors
# move to CPU again
z.to("cpu") # ``.to`` can also change dtype together!
# z = z.numpy()
x = torch.ones(5, requires_grad=True) # default requires_grad is False
print(x) # tensor([1., 1., 1., 1., 1.], requires_grad=True)
| 25.148936 | 92 | 0.663001 | import re
import torch
x = torch.empty(1)
print(x)
x = torch.empty(3)
print(x)
x = torch.empty(2,3)
print(x)
x = torch.empty(2,2,3)
rand(5, 3)
print(x)
x = torch.zeros(5, 3)
print(x)
print(x.size())
print(x.dtype)
x = torch.zeros(5, 3, dtype=torch.int)
x = torch.zeros(5, 3, dtype=torch.double)
x = torch.zeros(5, 3, dtype=torch.float16)
print(x)
print(x.dtype)
x = torch.tensor([5.5, 3])
print(x.size())
x = torch.tensor([5.5, 3], requires_grad=True)
y = torch.rand(2, 2)
x = torch.rand(2, 2)
z = x + y
z = torch.add(x,y)
z = x - y
z = torch.sub(x, y)
z = x * y
z = torch.mul(x,y)
z = x / y
z = torch.div(x,y)
x = torch.rand(5,3)
print(x)
print(x[:, 0])
print(x[1, :])
print(x[1, 1])
print('item:', x[1,1].item())
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8)
print(x.size(), y.size(), z.size())
a = torch.ones(5)
print(a)
b = a.numpy()
print(b)
print(type(b))
a.add_(1)
print(a)
print(b)
import numpy as np
a = np.ones(5)
b = torch.from_numpy(a)
print(a)
print(b)
a += 1
print(a)
print(b)
if torch.cuda.is_available():
device = torch.device("cuda") # a CUDA device object
y = torch.ones_like(x, device=device) # directly create a tensor on **GPU**
x = x.to(device) # or just use strings ``.to("cuda")``
z = x + y
# z = z.numpy() # not possible because numpy cannot handle GPU tenors
# move to CPU again
z.to("cpu") # ``.to`` can also change dtype together!
# z = z.numpy()
x = torch.ones(5, requires_grad=True) # default requires_grad is False
print(x) # tensor([1., 1., 1., 1., 1.], requires_grad=True)
| true | true |
1c3cc3606f983f7d09ed842ef58cc248742babf5 | 311 | py | Python | tests/benchmarks/test_parser.py | melvinkcx/graphql-core-next | b320331faf2fc2f4f1f6a1366f07109d1bdd44f1 | [
"MIT"
] | null | null | null | tests/benchmarks/test_parser.py | melvinkcx/graphql-core-next | b320331faf2fc2f4f1f6a1366f07109d1bdd44f1 | [
"MIT"
] | null | null | null | tests/benchmarks/test_parser.py | melvinkcx/graphql-core-next | b320331faf2fc2f4f1f6a1366f07109d1bdd44f1 | [
"MIT"
] | null | null | null | from graphql import parse, DocumentNode
# noinspection PyUnresolvedReferences
from ..fixtures import kitchen_sink_query # noqa: F401
def test_parse_kitchen_sink(benchmark, kitchen_sink_query): # noqa: F811
query = benchmark(lambda: parse(kitchen_sink_query))
assert isinstance(query, DocumentNode)
| 31.1 | 73 | 0.800643 | from graphql import parse, DocumentNode
from ..fixtures import kitchen_sink_query
def test_parse_kitchen_sink(benchmark, kitchen_sink_query):
query = benchmark(lambda: parse(kitchen_sink_query))
assert isinstance(query, DocumentNode)
| true | true |
1c3cc43bdda11d873ff21ac3eefb3df35e6d0679 | 4,718 | py | Python | src/pyjion/__init__.py | FasterSpeeding/Pyjion | 137fbaa6dd68e17ffbeba076a0ce31dbde5df218 | [
"MIT"
] | null | null | null | src/pyjion/__init__.py | FasterSpeeding/Pyjion | 137fbaa6dd68e17ffbeba076a0ce31dbde5df218 | [
"MIT"
] | null | null | null | src/pyjion/__init__.py | FasterSpeeding/Pyjion | 137fbaa6dd68e17ffbeba076a0ce31dbde5df218 | [
"MIT"
] | null | null | null | import ctypes
import pathlib
import os
import platform
from enum import IntFlag, IntEnum
from dataclasses import dataclass
__version__ = '1.1.0'
def _no_dotnet(path):
raise ImportError(f"Can't find a .NET 6 installation in {path}, "
"provide the DOTNET_ROOT environment variable "
"if it's installed somewhere unusual")
def _which_dotnet() -> str:
"""
Locate the clrjit library path
"""
_dotnet_root = None
if 'DOTNET_ROOT' in os.environ:
_dotnet_root = pathlib.Path(os.environ['DOTNET_ROOT'])
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
if 'DOTNET_LIB_PATH' in os.environ:
ctypes.cdll.LoadLibrary(os.environ['DOTNET_LIB_PATH'])
return os.environ['DOTNET_LIB_PATH']
if platform.system() == "Darwin":
if not _dotnet_root:
_dotnet_root = pathlib.Path('/usr/local/share/dotnet/')
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/libclrjit.dylib'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
elif platform.system() == "Linux":
if not _dotnet_root:
search_paths = [pathlib.Path('/usr/local/share/dotnet/'), pathlib.Path('/usr/share/dotnet/')]
for path in search_paths:
if not path.exists():
continue
else:
_dotnet_root = path
if not _dotnet_root:
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/libclrjit.so'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
elif platform.system() == "Windows":
if not _dotnet_root:
_dotnet_root = pathlib.WindowsPath(os.path.expandvars(r'%ProgramFiles%\dotnet'))
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/clrjit.dll'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
else:
raise ValueError("Operating System not Supported")
lib_path = _which_dotnet()
try:
from ._pyjion import enable, disable, info as _info, il, native, offsets, \
graph, init as _init, symbols, config, PyjionUnboxingError
_init(lib_path)
except ImportError:
raise ImportError(
"""
Failed to import the compiled Pyjion module. This normally means something went wrong during pip install
and the binaries weren't compiled. Make sure you update pip before installing to get the right wheel.
If that doesn't work, run pip in verbose mode, or file an issue at https://github.com/tonybaloney/pyjion/.
"""
)
class OptimizationFlags(IntFlag):
InlineIs = 1
InlineDecref = 2
InternRichCompare = 4
InlineFramePushPop = 8
KnownStoreSubscr = 16
KnownBinarySubscr = 32
InlineIterators = 64
HashedNames = 128
BuiltinMethods = 256
TypeSlotLookups = 512
FunctionCalls = 1024
LoadAttr = 2048
Unboxing = 4096
IsNone = 8192
IntegerUnboxingMultiply = 16384
OptimisticIntegers = 32768
class CompilationResult(IntEnum):
NoResult = 0,
Success = 1,
CompilationException = 10
CompilationJitFailure = 11
CompilationStackEffectFault = 12
IncompatibleCompilerFlags = 100
IncompatibleSize = 101
IncompatibleOpcode_Yield = 102
IncompatibleOpcode_WithExcept = 103
IncompatibleOpcode_With = 104
IncompatibleOpcode_Unknown = 110
IncompatibleFrameGlobal = 120
class PgcStatus(IntEnum):
Uncompiled = 0
CompiledWithProbes = 1
Optimized = 2
@dataclass()
class JitInfo:
failed: bool
compile_result: CompilationResult
compiled: bool
optimizations: OptimizationFlags
pgc: PgcStatus
run_count: int
tracing: bool
profiling: bool
def info(f) -> JitInfo:
d = _info(f)
return JitInfo(d['failed'],
CompilationResult(d['compile_result']),
d['compiled'],
OptimizationFlags(d['optimizations']),
PgcStatus(d['pgc']),
d['run_count'],
d['tracing'],
d['profiling'])
| 30.836601 | 106 | 0.630352 | import ctypes
import pathlib
import os
import platform
from enum import IntFlag, IntEnum
from dataclasses import dataclass
__version__ = '1.1.0'
def _no_dotnet(path):
raise ImportError(f"Can't find a .NET 6 installation in {path}, "
"provide the DOTNET_ROOT environment variable "
"if it's installed somewhere unusual")
def _which_dotnet() -> str:
_dotnet_root = None
if 'DOTNET_ROOT' in os.environ:
_dotnet_root = pathlib.Path(os.environ['DOTNET_ROOT'])
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
if 'DOTNET_LIB_PATH' in os.environ:
ctypes.cdll.LoadLibrary(os.environ['DOTNET_LIB_PATH'])
return os.environ['DOTNET_LIB_PATH']
if platform.system() == "Darwin":
if not _dotnet_root:
_dotnet_root = pathlib.Path('/usr/local/share/dotnet/')
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/libclrjit.dylib'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
elif platform.system() == "Linux":
if not _dotnet_root:
search_paths = [pathlib.Path('/usr/local/share/dotnet/'), pathlib.Path('/usr/share/dotnet/')]
for path in search_paths:
if not path.exists():
continue
else:
_dotnet_root = path
if not _dotnet_root:
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/libclrjit.so'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
elif platform.system() == "Windows":
if not _dotnet_root:
_dotnet_root = pathlib.WindowsPath(os.path.expandvars(r'%ProgramFiles%\dotnet'))
if not _dotnet_root.exists():
_no_dotnet(_dotnet_root)
lib_path = list(_dotnet_root.glob('shared/Microsoft.NETCore.App*/6.0.*/clrjit.dll'))
if len(lib_path) > 0:
clrjitlib = str(lib_path[0])
ctypes.cdll.LoadLibrary(clrjitlib)
return clrjitlib
else:
_no_dotnet(_dotnet_root)
else:
raise ValueError("Operating System not Supported")
lib_path = _which_dotnet()
try:
from ._pyjion import enable, disable, info as _info, il, native, offsets, \
graph, init as _init, symbols, config, PyjionUnboxingError
_init(lib_path)
except ImportError:
raise ImportError(
"""
Failed to import the compiled Pyjion module. This normally means something went wrong during pip install
and the binaries weren't compiled. Make sure you update pip before installing to get the right wheel.
If that doesn't work, run pip in verbose mode, or file an issue at https://github.com/tonybaloney/pyjion/.
"""
)
class OptimizationFlags(IntFlag):
InlineIs = 1
InlineDecref = 2
InternRichCompare = 4
InlineFramePushPop = 8
KnownStoreSubscr = 16
KnownBinarySubscr = 32
InlineIterators = 64
HashedNames = 128
BuiltinMethods = 256
TypeSlotLookups = 512
FunctionCalls = 1024
LoadAttr = 2048
Unboxing = 4096
IsNone = 8192
IntegerUnboxingMultiply = 16384
OptimisticIntegers = 32768
class CompilationResult(IntEnum):
NoResult = 0,
Success = 1,
CompilationException = 10
CompilationJitFailure = 11
CompilationStackEffectFault = 12
IncompatibleCompilerFlags = 100
IncompatibleSize = 101
IncompatibleOpcode_Yield = 102
IncompatibleOpcode_WithExcept = 103
IncompatibleOpcode_With = 104
IncompatibleOpcode_Unknown = 110
IncompatibleFrameGlobal = 120
class PgcStatus(IntEnum):
Uncompiled = 0
CompiledWithProbes = 1
Optimized = 2
@dataclass()
class JitInfo:
failed: bool
compile_result: CompilationResult
compiled: bool
optimizations: OptimizationFlags
pgc: PgcStatus
run_count: int
tracing: bool
profiling: bool
def info(f) -> JitInfo:
d = _info(f)
return JitInfo(d['failed'],
CompilationResult(d['compile_result']),
d['compiled'],
OptimizationFlags(d['optimizations']),
PgcStatus(d['pgc']),
d['run_count'],
d['tracing'],
d['profiling'])
| true | true |
1c3cc43e8525d4a35a4756127840bfc8d81b66a3 | 455 | py | Python | restaurant_project/menu/tests/test_urls.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | restaurant_project/menu/tests/test_urls.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | restaurant_project/menu/tests/test_urls.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
class TestMenu(TestCase):
HOMEPAGE_URL = '/'
def setUp(self):
self.anonymous_client = Client()
def test_menu_urls(self):
url_code = {
self.HOMEPAGE_URL: 200,
}
for url, code in url_code.items():
with self.subTest(url=url):
response = self.anonymous_client.get(url)
self.assertEqual(response.status_code, code)
| 25.277778 | 60 | 0.595604 | from django.test import TestCase, Client
class TestMenu(TestCase):
HOMEPAGE_URL = '/'
def setUp(self):
self.anonymous_client = Client()
def test_menu_urls(self):
url_code = {
self.HOMEPAGE_URL: 200,
}
for url, code in url_code.items():
with self.subTest(url=url):
response = self.anonymous_client.get(url)
self.assertEqual(response.status_code, code)
| true | true |
1c3cc6dd770ac281c96d7df5ddf2b74446b08133 | 2,549 | py | Python | ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py | apstwilly/ml-agents | d3f9fd63043f1c82790d3fe35ee07dc5ed1232b9 | [
"Apache-2.0"
] | 33 | 2018-09-04T12:10:49.000Z | 2022-02-05T03:27:40.000Z | ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py | Beinger/ml-agents | d3f9fd63043f1c82790d3fe35ee07dc5ed1232b9 | [
"Apache-2.0"
] | 1 | 2022-02-05T03:51:16.000Z | 2022-02-06T22:48:42.000Z | ml-agents/mlagents/envs/communicator_objects/unity_rl_initialization_input_pb2.py | Beinger/ml-agents | d3f9fd63043f1c82790d3fe35ee07dc5ed1232b9 | [
"Apache-2.0"
] | 3 | 2019-03-20T05:00:43.000Z | 2020-01-27T16:53:38.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/unity_rl_initialization_input.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents/envs/communicator_objects/unity_rl_initialization_input.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\nFmlagents/envs/communicator_objects/unity_rl_initialization_input.proto\x12\x14\x63ommunicator_objects\"*\n\x1aUnityRLInitializationInput\x12\x0c\n\x04seed\x18\x01 \x01(\x05\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
)
_UNITYRLINITIALIZATIONINPUT = _descriptor.Descriptor(
name='UnityRLInitializationInput',
full_name='communicator_objects.UnityRLInitializationInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seed', full_name='communicator_objects.UnityRLInitializationInput.seed', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=138,
)
DESCRIPTOR.message_types_by_name['UnityRLInitializationInput'] = _UNITYRLINITIALIZATIONINPUT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UnityRLInitializationInput = _reflection.GeneratedProtocolMessageType('UnityRLInitializationInput', (_message.Message,), dict(
DESCRIPTOR = _UNITYRLINITIALIZATIONINPUT,
__module__ = 'mlagents.envs.communicator_objects.unity_rl_initialization_input_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.UnityRLInitializationInput)
))
_sym_db.RegisterMessage(UnityRLInitializationInput)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\034MLAgents.CommunicatorObjects'))
# @@protoc_insertion_point(module_scope)
| 35.402778 | 256 | 0.804629 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents/envs/communicator_objects/unity_rl_initialization_input.proto',
package='communicator_objects',
syntax='proto3',
serialized_pb=_b('\nFmlagents/envs/communicator_objects/unity_rl_initialization_input.proto\x12\x14\x63ommunicator_objects\"*\n\x1aUnityRLInitializationInput\x12\x0c\n\x04seed\x18\x01 \x01(\x05\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
)
_UNITYRLINITIALIZATIONINPUT = _descriptor.Descriptor(
name='UnityRLInitializationInput',
full_name='communicator_objects.UnityRLInitializationInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='seed', full_name='communicator_objects.UnityRLInitializationInput.seed', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=96,
serialized_end=138,
)
DESCRIPTOR.message_types_by_name['UnityRLInitializationInput'] = _UNITYRLINITIALIZATIONINPUT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
UnityRLInitializationInput = _reflection.GeneratedProtocolMessageType('UnityRLInitializationInput', (_message.Message,), dict(
DESCRIPTOR = _UNITYRLINITIALIZATIONINPUT,
__module__ = 'mlagents.envs.communicator_objects.unity_rl_initialization_input_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.UnityRLInitializationInput)
))
_sym_db.RegisterMessage(UnityRLInitializationInput)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\252\002\034MLAgents.CommunicatorObjects'))
# @@protoc_insertion_point(module_scope)
| true | true |
1c3cc7805aeb74c829cf09c428d6456723b0ef14 | 3,754 | py | Python | tests/test_util.py | nickfrostatx/frost-ci | 97fc234eb174a1242481b40e56aebba595827a69 | [
"MIT"
] | null | null | null | tests/test_util.py | nickfrostatx/frost-ci | 97fc234eb174a1242481b40e56aebba595827a69 | [
"MIT"
] | null | null | null | tests/test_util.py | nickfrostatx/frost-ci | 97fc234eb174a1242481b40e56aebba595827a69 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test utility functions."""
import flask
import frost.util
import json
import pytest
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
def test_check_state():
app = flask.Flask(__name__)
class FakeSessionInterface(flask.sessions.SessionInterface):
def open_session(self, app, request):
return {'csrf': 'somecsrf'}
def save_session(self, app, session, response):
pass
app.session_interface = FakeSessionInterface()
@app.route('/')
@frost.util.check_state
def home():
return 'abc'
with app.test_client() as client:
rv = client.get('/')
assert rv.status_code == 403
rv = client.get('/?state=')
assert rv.status_code == 403
rv = client.get('/?state=fake')
assert rv.status_code == 403
rv = client.get('/?state=somecsrf')
assert rv.status_code == 200
def test_is_safe_url_absolute():
app = flask.Flask(__name__)
@app.route('/url')
def home():
safe = False
if flask.request.referrer:
safe = frost.util.is_safe_url(flask.request.referrer, False)
return flask.jsonify({'safe': safe})
with app.test_client() as client:
def is_safe(referrer=None):
headers = None
if referrer:
headers = {'Referer': referrer}
rv = client.get('/url', headers=headers)
return json.loads(rv.data.decode())['safe']
assert not is_safe()
assert not is_safe('')
assert not is_safe('/')
assert not is_safe('abc')
assert not is_safe('/abc')
assert not is_safe('/url')
assert not is_safe('http://example.com')
assert not is_safe('http://example.com/abc')
assert not is_safe('http://localhost:1234/abc')
assert not is_safe('http://localhost')
assert not is_safe('ftp://localhost/abc')
assert not is_safe('http://localhost/url')
assert is_safe('http://localhost/')
assert is_safe('http://localhost/abc')
def test_is_safe_url_relative():
app = flask.Flask(__name__)
app.debug = True
@app.route('/url')
def home():
safe = False
next = flask.request.args.get('next')
if next:
safe = frost.util.is_safe_url(next, True)
return flask.jsonify({'safe': safe})
with app.test_client() as client:
def is_safe(next=None):
url = '/url'
if next:
url += '?next=' + quote(next, safe='')
rv = client.get(url)
return json.loads(rv.data.decode('utf-8'))['safe']
assert not is_safe()
assert not is_safe('')
assert not is_safe('abc')
assert not is_safe('/url')
assert not is_safe('http://abc')
assert not is_safe('http://example.com')
assert not is_safe('http://example.com/abc')
assert not is_safe('http://localhost:1234/abc')
assert not is_safe('http://localhost/')
assert not is_safe('http://localhost')
assert not is_safe('ftp://localhost/abc')
assert not is_safe('http://localhost/abc')
assert is_safe('/')
assert is_safe('/abc')
def test_random_string():
with pytest.raises(AssertionError):
frost.util.random_string(1)
with pytest.raises(AssertionError):
frost.util.random_string(3)
with pytest.raises(AssertionError):
frost.util.random_string(39)
assert isinstance(frost.util.random_string(4), type(u''))
assert len(frost.util.random_string(4)) == 4
assert len(frost.util.random_string(8)) == 8
assert len(frost.util.random_string(40)) == 40
| 29.328125 | 72 | 0.59723 |
import flask
import frost.util
import json
import pytest
try:
from urllib.parse import quote
except ImportError:
from urllib import quote
def test_check_state():
app = flask.Flask(__name__)
class FakeSessionInterface(flask.sessions.SessionInterface):
def open_session(self, app, request):
return {'csrf': 'somecsrf'}
def save_session(self, app, session, response):
pass
app.session_interface = FakeSessionInterface()
@app.route('/')
@frost.util.check_state
def home():
return 'abc'
with app.test_client() as client:
rv = client.get('/')
assert rv.status_code == 403
rv = client.get('/?state=')
assert rv.status_code == 403
rv = client.get('/?state=fake')
assert rv.status_code == 403
rv = client.get('/?state=somecsrf')
assert rv.status_code == 200
def test_is_safe_url_absolute():
app = flask.Flask(__name__)
@app.route('/url')
def home():
safe = False
if flask.request.referrer:
safe = frost.util.is_safe_url(flask.request.referrer, False)
return flask.jsonify({'safe': safe})
with app.test_client() as client:
def is_safe(referrer=None):
headers = None
if referrer:
headers = {'Referer': referrer}
rv = client.get('/url', headers=headers)
return json.loads(rv.data.decode())['safe']
assert not is_safe()
assert not is_safe('')
assert not is_safe('/')
assert not is_safe('abc')
assert not is_safe('/abc')
assert not is_safe('/url')
assert not is_safe('http://example.com')
assert not is_safe('http://example.com/abc')
assert not is_safe('http://localhost:1234/abc')
assert not is_safe('http://localhost')
assert not is_safe('ftp://localhost/abc')
assert not is_safe('http://localhost/url')
assert is_safe('http://localhost/')
assert is_safe('http://localhost/abc')
def test_is_safe_url_relative():
app = flask.Flask(__name__)
app.debug = True
@app.route('/url')
def home():
safe = False
next = flask.request.args.get('next')
if next:
safe = frost.util.is_safe_url(next, True)
return flask.jsonify({'safe': safe})
with app.test_client() as client:
def is_safe(next=None):
url = '/url'
if next:
url += '?next=' + quote(next, safe='')
rv = client.get(url)
return json.loads(rv.data.decode('utf-8'))['safe']
assert not is_safe()
assert not is_safe('')
assert not is_safe('abc')
assert not is_safe('/url')
assert not is_safe('http://abc')
assert not is_safe('http://example.com')
assert not is_safe('http://example.com/abc')
assert not is_safe('http://localhost:1234/abc')
assert not is_safe('http://localhost/')
assert not is_safe('http://localhost')
assert not is_safe('ftp://localhost/abc')
assert not is_safe('http://localhost/abc')
assert is_safe('/')
assert is_safe('/abc')
def test_random_string():
with pytest.raises(AssertionError):
frost.util.random_string(1)
with pytest.raises(AssertionError):
frost.util.random_string(3)
with pytest.raises(AssertionError):
frost.util.random_string(39)
assert isinstance(frost.util.random_string(4), type(u''))
assert len(frost.util.random_string(4)) == 4
assert len(frost.util.random_string(8)) == 8
assert len(frost.util.random_string(40)) == 40
| true | true |
1c3cc7e5fcb7f496a7ac37b7f6308f018dab22bc | 937 | py | Python | core/mailer.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 8 | 2020-07-23T08:17:28.000Z | 2022-03-09T22:31:36.000Z | core/mailer.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 37 | 2020-07-01T08:47:33.000Z | 2022-02-03T19:50:58.000Z | core/mailer.py | Foohx/acceslibre | 55135e096f2ec4e413ff991f01c17f5e0d5925c0 | [
"MIT"
] | 4 | 2021-04-08T10:57:18.000Z | 2022-01-31T13:16:31.000Z | from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
def send_email(
to_list, subject, template, context=None, reply_to=None, fail_silently=True
):
context = context if context else {}
context["SITE_NAME"] = settings.SITE_NAME
context["SITE_ROOT_URL"] = settings.SITE_ROOT_URL
email = EmailMessage(
subject=subject,
body=render_to_string(template, context),
from_email=settings.DEFAULT_FROM_EMAIL,
to=to_list,
reply_to=[reply_to] if reply_to else [settings.DEFAULT_FROM_EMAIL],
)
# Note: The return value will be the number of successfully delivered messages
# (which can be 0 or 1 since send_mail can only send one message).
return 1 == email.send(fail_silently=fail_silently)
def mail_admins(*args, **kwargs):
return send_email([settings.DEFAULT_FROM_EMAIL], *args, **kwargs)
| 34.703704 | 82 | 0.729989 | from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
def send_email(
to_list, subject, template, context=None, reply_to=None, fail_silently=True
):
context = context if context else {}
context["SITE_NAME"] = settings.SITE_NAME
context["SITE_ROOT_URL"] = settings.SITE_ROOT_URL
email = EmailMessage(
subject=subject,
body=render_to_string(template, context),
from_email=settings.DEFAULT_FROM_EMAIL,
to=to_list,
reply_to=[reply_to] if reply_to else [settings.DEFAULT_FROM_EMAIL],
)
return 1 == email.send(fail_silently=fail_silently)
def mail_admins(*args, **kwargs):
return send_email([settings.DEFAULT_FROM_EMAIL], *args, **kwargs)
| true | true |
1c3cc85dc8d56700337ab06f29b625d32838a7be | 4,773 | py | Python | net_per_dev.py | erthalion/postgres-bcc | 6c18e8cf795acde2479d536304cdae720b14d8c6 | [
"Apache-2.0"
] | 37 | 2019-02-27T12:18:15.000Z | 2022-03-28T07:18:42.000Z | net_per_dev.py | erthalion/postgres-bcc | 6c18e8cf795acde2479d536304cdae720b14d8c6 | [
"Apache-2.0"
] | 1 | 2019-12-10T09:37:26.000Z | 2019-12-23T11:22:41.000Z | net_per_dev.py | erthalion/postgres-bcc | 6c18e8cf795acde2479d536304cdae720b14d8c6 | [
"Apache-2.0"
] | 1 | 2019-12-07T01:50:10.000Z | 2019-12-07T01:50:10.000Z | #!/usr/bin/env python
#
# net_per_dev Track how much data was transmitted per netword device
#
# usage: net_per_dev [-d]
from __future__ import print_function
from time import sleep
import argparse
import ctypes as ct
import signal
from bcc import BPF
import utils
bpf_text = """
#include <linux/ptrace.h>
struct key_t {
char device[10];
};
struct net_data {
u32 pid;
u32 __padding;
unsigned int len;
char device[10];
};
#define IFNAMSIZ 16
struct net_device {
char name[10];
};
struct sk_buff {
union {
struct {
/* These two members must be first. */
struct sk_buff *next;
struct sk_buff *prev;
union {
struct net_device *dev;
/* Some protocols might use this space to store information,
* while device pointer would be NULL.
* UDP receive path is one user.
*/
unsigned long dev_scratch;
};
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
};
union {
struct sock *sk;
int ip_defrag_offset;
};
union {
ktime_t tstamp;
u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48] __aligned(8);
union {
struct {
unsigned long _skb_refdst;
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
};
struct sec_path *sp;
unsigned long _nfct;
struct nf_bridge_info *nf_bridge;
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
};
BPF_PERF_OUTPUT(events);
BPF_HASH(net_data_hash, struct key_t);
int probe_dev_hard_start_xmit(struct pt_regs *ctx)
{
u32 pid = bpf_get_current_pid_tgid();
struct sk_buff buff = {};
struct net_device device = {};
struct key_t key = {};
bpf_probe_read(&buff,
sizeof(buff),
((struct sk_buff *)PT_REGS_PARM1(ctx)));
bpf_probe_read(&device,
sizeof(device),
((struct net_device *)PT_REGS_PARM2(ctx)));
struct net_data data = {};
data.pid = pid;
data.len = buff.len;
bpf_probe_read(&data.device,
IFNAMSIZ,
device.name);
bpf_probe_read(&key.device,
IFNAMSIZ,
device.name);
u64 zero = 0, *val;
val = net_data_hash.lookup_or_init(&key, &zero);
(*val) += buff.len;
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
def attach(bpf):
bpf.attach_kprobe(
event="dev_hard_start_xmit",
fn_name="probe_dev_hard_start_xmit")
# signal handler
def signal_ignore(sig, frame):
print()
class Data(ct.Structure):
_fields_ = [("pid", ct.c_ulong),
("len", ct.c_uint),
("device", ct.c_char * 10)]
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
print("Event: pid {} device {} len {}".format(
event.pid, event.device, event.len))
def run(args):
print("Attaching...")
debug = 4 if args.debug else 0
bpf = BPF(text=bpf_text, debug=debug)
attach(bpf)
exiting = False
if args.debug:
bpf["events"].open_perf_buffer(print_event)
print("Listening...")
while True:
try:
sleep(1)
if args.debug:
bpf.perf_buffer_poll()
except KeyboardInterrupt:
exiting = True
# as cleanup can take many seconds, trap Ctrl-C:
signal.signal(signal.SIGINT, signal_ignore)
if exiting:
print()
print("Detaching...")
print()
break
print("Total")
for (k, v) in bpf.get_table("net_data_hash").items():
print('{}: {}'.format(k.device.decode("ascii"), utils.size(v.value)))
def parse_args():
parser = argparse.ArgumentParser(
description="Track how much data was transmitted per netword device",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-d", "--debug", action='store_true', default=False,
help="debug mode")
return parser.parse_args()
if __name__ == "__main__":
run(parse_args())
| 23.865 | 79 | 0.562958 |
from __future__ import print_function
from time import sleep
import argparse
import ctypes as ct
import signal
from bcc import BPF
import utils
bpf_text = """
#include <linux/ptrace.h>
struct key_t {
char device[10];
};
struct net_data {
u32 pid;
u32 __padding;
unsigned int len;
char device[10];
};
#define IFNAMSIZ 16
struct net_device {
char name[10];
};
struct sk_buff {
union {
struct {
/* These two members must be first. */
struct sk_buff *next;
struct sk_buff *prev;
union {
struct net_device *dev;
/* Some protocols might use this space to store information,
* while device pointer would be NULL.
* UDP receive path is one user.
*/
unsigned long dev_scratch;
};
};
struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
struct list_head list;
};
union {
struct sock *sk;
int ip_defrag_offset;
};
union {
ktime_t tstamp;
u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
* layer. Please put your private variables there. If you
* want to keep them across layers you have to do a skb_clone()
* first. This is owned by whoever has the skb queued ATM.
*/
char cb[48] __aligned(8);
union {
struct {
unsigned long _skb_refdst;
void (*destructor)(struct sk_buff *skb);
};
struct list_head tcp_tsorted_anchor;
};
struct sec_path *sp;
unsigned long _nfct;
struct nf_bridge_info *nf_bridge;
unsigned int len,
data_len;
__u16 mac_len,
hdr_len;
};
BPF_PERF_OUTPUT(events);
BPF_HASH(net_data_hash, struct key_t);
int probe_dev_hard_start_xmit(struct pt_regs *ctx)
{
u32 pid = bpf_get_current_pid_tgid();
struct sk_buff buff = {};
struct net_device device = {};
struct key_t key = {};
bpf_probe_read(&buff,
sizeof(buff),
((struct sk_buff *)PT_REGS_PARM1(ctx)));
bpf_probe_read(&device,
sizeof(device),
((struct net_device *)PT_REGS_PARM2(ctx)));
struct net_data data = {};
data.pid = pid;
data.len = buff.len;
bpf_probe_read(&data.device,
IFNAMSIZ,
device.name);
bpf_probe_read(&key.device,
IFNAMSIZ,
device.name);
u64 zero = 0, *val;
val = net_data_hash.lookup_or_init(&key, &zero);
(*val) += buff.len;
events.perf_submit(ctx, &data, sizeof(data));
return 0;
}
"""
def attach(bpf):
bpf.attach_kprobe(
event="dev_hard_start_xmit",
fn_name="probe_dev_hard_start_xmit")
def signal_ignore(sig, frame):
print()
class Data(ct.Structure):
_fields_ = [("pid", ct.c_ulong),
("len", ct.c_uint),
("device", ct.c_char * 10)]
def print_event(cpu, data, size):
event = ct.cast(data, ct.POINTER(Data)).contents
print("Event: pid {} device {} len {}".format(
event.pid, event.device, event.len))
def run(args):
print("Attaching...")
debug = 4 if args.debug else 0
bpf = BPF(text=bpf_text, debug=debug)
attach(bpf)
exiting = False
if args.debug:
bpf["events"].open_perf_buffer(print_event)
print("Listening...")
while True:
try:
sleep(1)
if args.debug:
bpf.perf_buffer_poll()
except KeyboardInterrupt:
exiting = True
signal.signal(signal.SIGINT, signal_ignore)
if exiting:
print()
print("Detaching...")
print()
break
print("Total")
for (k, v) in bpf.get_table("net_data_hash").items():
print('{}: {}'.format(k.device.decode("ascii"), utils.size(v.value)))
def parse_args():
parser = argparse.ArgumentParser(
description="Track how much data was transmitted per netword device",
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"-d", "--debug", action='store_true', default=False,
help="debug mode")
return parser.parse_args()
if __name__ == "__main__":
run(parse_args())
| true | true |
1c3cc9334380dc39afcaf35db9b9f3acdc612122 | 6,447 | py | Python | homeassistant/components/arlo/sensor.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-02-18T03:16:32.000Z | 2019-02-18T03:16:32.000Z | homeassistant/components/arlo/sensor.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 3 | 2021-09-08T03:29:36.000Z | 2022-03-12T00:59:48.000Z | homeassistant/components/arlo/sensor.py | shanbs/home-assistant | 818776d2b4f11e4f51992dc88bc0a6f9055833b2 | [
"Apache-2.0"
] | 1 | 2019-09-28T07:06:08.000Z | 2019-09-28T07:06:08.000Z | """Sensor support for Netgear Arlo IP cameras."""
import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.components.arlo import (
ATTRIBUTION, DEFAULT_BRAND, DATA_ARLO, SIGNAL_UPDATE_ARLO)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS, TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_HUMIDITY)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['arlo']
# sensor_type [ description, unit, icon ]
SENSOR_TYPES = {
'last_capture': ['Last', None, 'run-fast'],
'total_cameras': ['Arlo Cameras', None, 'video'],
'captured_today': ['Captured Today', None, 'file-video'],
'battery_level': ['Battery Level', '%', 'battery-50'],
'signal_strength': ['Signal Strength', None, 'signal'],
'temperature': ['Temperature', TEMP_CELSIUS, 'thermometer'],
'humidity': ['Humidity', '%', 'water-percent'],
'air_quality': ['Air Quality', 'ppm', 'biohazard']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up an Arlo IP sensor."""
arlo = hass.data.get(DATA_ARLO)
if not arlo:
return
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == 'total_cameras':
sensors.append(ArloSensor(
SENSOR_TYPES[sensor_type][0], arlo, sensor_type))
else:
for camera in arlo.cameras:
if sensor_type in ('temperature', 'humidity', 'air_quality'):
continue
name = '{0} {1}'.format(
SENSOR_TYPES[sensor_type][0], camera.name)
sensors.append(ArloSensor(name, camera, sensor_type))
for base_station in arlo.base_stations:
if sensor_type in ('temperature', 'humidity', 'air_quality') \
and base_station.model_id == 'ABC1000':
name = '{0} {1}'.format(
SENSOR_TYPES[sensor_type][0], base_station.name)
sensors.append(ArloSensor(name, base_station, sensor_type))
add_entities(sensors, True)
class ArloSensor(Entity):
"""An implementation of a Netgear Arlo IP sensor."""
def __init__(self, name, device, sensor_type):
"""Initialize an Arlo sensor."""
_LOGGER.debug('ArloSensor created for %s', name)
self._name = name
self._data = device
self._sensor_type = sensor_type
self._state = None
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[2])
@property
def name(self):
"""Return the name of this camera."""
return self._name
async def async_added_to_hass(self):
"""Register callbacks."""
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == 'battery_level' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[1]
@property
def device_class(self):
"""Return the device class of the sensor."""
if self._sensor_type == 'temperature':
return DEVICE_CLASS_TEMPERATURE
if self._sensor_type == 'humidity':
return DEVICE_CLASS_HUMIDITY
return None
def update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Updating Arlo sensor %s", self.name)
if self._sensor_type == 'total_cameras':
self._state = len(self._data.cameras)
elif self._sensor_type == 'captured_today':
self._state = len(self._data.captured_today)
elif self._sensor_type == 'last_capture':
try:
video = self._data.last_video
self._state = video.created_at_pretty("%m-%d-%Y %H:%M:%S")
except (AttributeError, IndexError):
error_msg = \
'Video not found for {0}. Older than {1} days?'.format(
self.name, self._data.min_days_vdo_cache)
_LOGGER.debug(error_msg)
self._state = None
elif self._sensor_type == 'battery_level':
try:
self._state = self._data.battery_level
except TypeError:
self._state = None
elif self._sensor_type == 'signal_strength':
try:
self._state = self._data.signal_strength
except TypeError:
self._state = None
elif self._sensor_type == 'temperature':
try:
self._state = self._data.ambient_temperature
except TypeError:
self._state = None
elif self._sensor_type == 'humidity':
try:
self._state = self._data.ambient_humidity
except TypeError:
self._state = None
elif self._sensor_type == 'air_quality':
try:
self._state = self._data.ambient_air_quality
except TypeError:
self._state = None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['brand'] = DEFAULT_BRAND
if self._sensor_type != 'total_cameras':
attrs['model'] = self._data.model_id
return attrs
| 34.475936 | 79 | 0.612843 | import logging
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.components.arlo import (
ATTRIBUTION, DEFAULT_BRAND, DATA_ARLO, SIGNAL_UPDATE_ARLO)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_MONITORED_CONDITIONS, TEMP_CELSIUS,
DEVICE_CLASS_TEMPERATURE, DEVICE_CLASS_HUMIDITY)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['arlo']
SENSOR_TYPES = {
'last_capture': ['Last', None, 'run-fast'],
'total_cameras': ['Arlo Cameras', None, 'video'],
'captured_today': ['Captured Today', None, 'file-video'],
'battery_level': ['Battery Level', '%', 'battery-50'],
'signal_strength': ['Signal Strength', None, 'signal'],
'temperature': ['Temperature', TEMP_CELSIUS, 'thermometer'],
'humidity': ['Humidity', '%', 'water-percent'],
'air_quality': ['Air Quality', 'ppm', 'biohazard']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
arlo = hass.data.get(DATA_ARLO)
if not arlo:
return
sensors = []
for sensor_type in config.get(CONF_MONITORED_CONDITIONS):
if sensor_type == 'total_cameras':
sensors.append(ArloSensor(
SENSOR_TYPES[sensor_type][0], arlo, sensor_type))
else:
for camera in arlo.cameras:
if sensor_type in ('temperature', 'humidity', 'air_quality'):
continue
name = '{0} {1}'.format(
SENSOR_TYPES[sensor_type][0], camera.name)
sensors.append(ArloSensor(name, camera, sensor_type))
for base_station in arlo.base_stations:
if sensor_type in ('temperature', 'humidity', 'air_quality') \
and base_station.model_id == 'ABC1000':
name = '{0} {1}'.format(
SENSOR_TYPES[sensor_type][0], base_station.name)
sensors.append(ArloSensor(name, base_station, sensor_type))
add_entities(sensors, True)
class ArloSensor(Entity):
def __init__(self, name, device, sensor_type):
_LOGGER.debug('ArloSensor created for %s', name)
self._name = name
self._data = device
self._sensor_type = sensor_type
self._state = None
self._icon = 'mdi:{}'.format(SENSOR_TYPES.get(self._sensor_type)[2])
@property
def name(self):
return self._name
async def async_added_to_hass(self):
async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ARLO, self._update_callback)
@callback
def _update_callback(self):
self.async_schedule_update_ha_state(True)
@property
def state(self):
return self._state
@property
def icon(self):
if self._sensor_type == 'battery_level' and self._state is not None:
return icon_for_battery_level(battery_level=int(self._state),
charging=False)
return self._icon
@property
def unit_of_measurement(self):
return SENSOR_TYPES.get(self._sensor_type)[1]
@property
def device_class(self):
if self._sensor_type == 'temperature':
return DEVICE_CLASS_TEMPERATURE
if self._sensor_type == 'humidity':
return DEVICE_CLASS_HUMIDITY
return None
def update(self):
_LOGGER.debug("Updating Arlo sensor %s", self.name)
if self._sensor_type == 'total_cameras':
self._state = len(self._data.cameras)
elif self._sensor_type == 'captured_today':
self._state = len(self._data.captured_today)
elif self._sensor_type == 'last_capture':
try:
video = self._data.last_video
self._state = video.created_at_pretty("%m-%d-%Y %H:%M:%S")
except (AttributeError, IndexError):
error_msg = \
'Video not found for {0}. Older than {1} days?'.format(
self.name, self._data.min_days_vdo_cache)
_LOGGER.debug(error_msg)
self._state = None
elif self._sensor_type == 'battery_level':
try:
self._state = self._data.battery_level
except TypeError:
self._state = None
elif self._sensor_type == 'signal_strength':
try:
self._state = self._data.signal_strength
except TypeError:
self._state = None
elif self._sensor_type == 'temperature':
try:
self._state = self._data.ambient_temperature
except TypeError:
self._state = None
elif self._sensor_type == 'humidity':
try:
self._state = self._data.ambient_humidity
except TypeError:
self._state = None
elif self._sensor_type == 'air_quality':
try:
self._state = self._data.ambient_air_quality
except TypeError:
self._state = None
@property
def device_state_attributes(self):
attrs = {}
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
attrs['brand'] = DEFAULT_BRAND
if self._sensor_type != 'total_cameras':
attrs['model'] = self._data.model_id
return attrs
| true | true |
1c3cc9ba10752b82472e53f9b3a4a4c1b0f3a11f | 1,851 | py | Python | applications/FluidTransportApplication/python_scripts/apply_vector_constraint_function_process.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/FluidTransportApplication/python_scripts/apply_vector_constraint_function_process.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/FluidTransportApplication/python_scripts/apply_vector_constraint_function_process.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | import KratosMultiphysics
import KratosMultiphysics.FluidTransportApplication as KratosFluidTransport
import math
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyVectorConstraintFunctionProcess(Model, settings["Parameters"])
## All the python processes should be derived from "python_process"
class ApplyVectorConstraintFunctionProcess(KratosMultiphysics.Process):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
self.components_process_list = []
if settings["active"][0].GetBool() == True:
for node in model_part.Nodes:
velocity = 10000*node.Y*(1-node.X*node.X)
#velocity = 0.8 * (node.Y - 0.5)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_X,velocity)
if settings["active"][1].GetBool() == True:
for node in model_part.Nodes:
velocity = -10000*node.X*(1-node.Y*node.Y)
#velocity = - 0.8 * (node.X - 0.5)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Y,velocity)
if settings["active"][2].GetBool() == True:
for node in model_part.Nodes:
velocity = 0.0
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Z,velocity)
# def ExecuteInitialize(self):
# for component in self.components_process_list:
# component.ExecuteInitialize()
# def ExecuteInitializeSolutionStep(self):
# for component in self.components_process_list:
# component.ExecuteInitializeSolutionStep() | 41.133333 | 100 | 0.655321 | import KratosMultiphysics
import KratosMultiphysics.FluidTransportApplication as KratosFluidTransport
import math
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyVectorConstraintFunctionProcess(Model, settings["Parameters"])
ocess):
def __init__(self, Model, settings ):
KratosMultiphysics.Process.__init__(self)
model_part = Model[settings["model_part_name"].GetString()]
self.components_process_list = []
if settings["active"][0].GetBool() == True:
for node in model_part.Nodes:
velocity = 10000*node.Y*(1-node.X*node.X)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_X,velocity)
if settings["active"][1].GetBool() == True:
for node in model_part.Nodes:
velocity = -10000*node.X*(1-node.Y*node.Y)
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Y,velocity)
if settings["active"][2].GetBool() == True:
for node in model_part.Nodes:
velocity = 0.0
node.SetSolutionStepValue(KratosMultiphysics.VELOCITY_Z,velocity)
| true | true |
1c3cc9c96a9dfde55155b2feb763c25cc4b4ce21 | 2,722 | py | Python | RequestsStampede/policy/retry.py | PatrickMurray/RequestsStampede | 88584d364da6632fe68cd26cc3fdfe40e0dc1f0d | [
"MIT"
] | 11 | 2021-04-18T01:31:33.000Z | 2022-02-14T15:24:42.000Z | RequestsStampede/policy/retry.py | PatrickMurray/RequestsStampede | 88584d364da6632fe68cd26cc3fdfe40e0dc1f0d | [
"MIT"
] | null | null | null | RequestsStampede/policy/retry.py | PatrickMurray/RequestsStampede | 88584d364da6632fe68cd26cc3fdfe40e0dc1f0d | [
"MIT"
] | null | null | null | """
Build-in retry policies and their abstract class.
"""
import abc
import typing
import math
import RequestsStampede.exceptions
class AbstractRetryPolicy(abc.ABC):
"""
An abstract class for use in implementing retry policies.
"""
attempts: int
class FixedRetryPolicy(AbstractRetryPolicy):
"""
Establishes a constant retry policy.
"""
def __init__(self, attempts: typing.Optional[int] = 5):
"""
A basic constructor.
:param attempts: The number of request retries to be attempted.
:type attempts: int
"""
assert isinstance(attempts, int)
assert attempts > 0
self.attempts = attempts
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
class InfiniteRetryPolicy(AbstractRetryPolicy):
"""
Establishes an infinite retry policy.
"""
def __init__(self):
"""
A basic constructor that takes no parameters.
"""
self.attempts = math.inf
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
class ConditionalRetryPolicy(AbstractRetryPolicy):
"""
Establishes a conditional retry policy.
Not Implemented.
"""
def __init__(self):
raise NotImplementedError
class CustomRetryPolicy(AbstractRetryPolicy):
"""
Establishes a custom file-based retry policy.
"""
def __init__(self, policy: dict):
"""
Provided a policy definition, initializes a custom retry policy based
on the parameters defined therein.
Example policies:
{
"type": "fixed"
"attempts" 10
}
{
"type": "infinite"
}
:param policy: A retry policy definition.
:type policy: dict
"""
policy_type = policy.get("type").lower()
if policy_type == "fixed":
attempts = policy.get("attempts")
assert isinstance(attempts, int)
assert attempts > 0
self.attempts = attempts
elif policy_type == "infinite":
self.attempts = math.inf
else:
raise RequestsStampede.exceptions.InvalidCustomRetryPolicy(policy_type)
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
| 21.603175 | 83 | 0.573475 |
import abc
import typing
import math
import RequestsStampede.exceptions
class AbstractRetryPolicy(abc.ABC):
attempts: int
class FixedRetryPolicy(AbstractRetryPolicy):
def __init__(self, attempts: typing.Optional[int] = 5):
assert isinstance(attempts, int)
assert attempts > 0
self.attempts = attempts
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
class InfiniteRetryPolicy(AbstractRetryPolicy):
def __init__(self):
self.attempts = math.inf
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
class ConditionalRetryPolicy(AbstractRetryPolicy):
def __init__(self):
raise NotImplementedError
class CustomRetryPolicy(AbstractRetryPolicy):
def __init__(self, policy: dict):
policy_type = policy.get("type").lower()
if policy_type == "fixed":
attempts = policy.get("attempts")
assert isinstance(attempts, int)
assert attempts > 0
self.attempts = attempts
elif policy_type == "infinite":
self.attempts = math.inf
else:
raise RequestsStampede.exceptions.InvalidCustomRetryPolicy(policy_type)
def __repr__(self):
return "<{}.{} object at {} attempts={}>".format(
__class__.__module__,
__class__.__name__,
hex(id(self)),
self.attempts,
)
| true | true |
1c3cca9b906ddba326683edcd803dccce19224de | 1,333 | py | Python | general/migrations/0006_auto_20160605_1640.py | memnonila/art | 10b3ef39023483f522b80269418831855ddc6fef | [
"MIT"
] | null | null | null | general/migrations/0006_auto_20160605_1640.py | memnonila/art | 10b3ef39023483f522b80269418831855ddc6fef | [
"MIT"
] | null | null | null | general/migrations/0006_auto_20160605_1640.py | memnonila/art | 10b3ef39023483f522b80269418831855ddc6fef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-05 16:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('general', '0005_items_item_description'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('handle', models.TextField()),
('message', models.TextField()),
('timestamp', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('label', models.SlugField(unique=True)),
],
),
migrations.AddField(
model_name='message',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='general.Room'),
),
]
| 33.325 | 125 | 0.582896 |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('general', '0005_items_item_description'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('handle', models.TextField()),
('message', models.TextField()),
('timestamp', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
],
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField()),
('label', models.SlugField(unique=True)),
],
),
migrations.AddField(
model_name='message',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='general.Room'),
),
]
| true | true |
1c3ccac2eee1f1170e78ca8146d1d98e56ed5d42 | 1,039 | py | Python | setup.py | yaacov/hawkular-client-cli | c08200e875fb123600f59c841d7479d852e4b4c5 | [
"Apache-2.0"
] | 1 | 2016-11-08T10:20:39.000Z | 2016-11-08T10:20:39.000Z | setup.py | yaacov/hawkular-client-cli | c08200e875fb123600f59c841d7479d852e4b4c5 | [
"Apache-2.0"
] | 1 | 2016-12-06T07:19:36.000Z | 2016-12-06T08:17:56.000Z | setup.py | yaacov/hawkular-client-cli | c08200e875fb123600f59c841d7479d852e4b4c5 | [
"Apache-2.0"
] | 1 | 2018-07-11T07:09:01.000Z | 2018-07-11T07:09:01.000Z | from setuptools import setup
_VERSION = '0.18.3'
_DESCRIPTION = 'Read/Write data to and from a Hawkular metric server.'
setup(name='hawkular-client-cli',
version=_VERSION,
description='Hawkular client cli',
long_description=_DESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Monitoring',
],
url='http://github.com/yaacov/hawkular-client-cli',
author='Yaacov Zamir',
author_email='yzamir@redhat.com',
license='Apache License 2.0',
packages=['hawkular_client_cli'],
install_requires=[
'future>=0.15.0',
'python-dateutil>=2.0.0',
'PyYAML>=3.0',
'hawkular-client>=0.5.2',
],
entry_points={
'console_scripts': ['hawkular-cli=hawkular_client_cli.command_line:main'],
},
include_package_data=True,
zip_safe=False)
| 30.558824 | 82 | 0.631376 | from setuptools import setup
_VERSION = '0.18.3'
_DESCRIPTION = 'Read/Write data to and from a Hawkular metric server.'
setup(name='hawkular-client-cli',
version=_VERSION,
description='Hawkular client cli',
long_description=_DESCRIPTION,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: System :: Monitoring',
],
url='http://github.com/yaacov/hawkular-client-cli',
author='Yaacov Zamir',
author_email='yzamir@redhat.com',
license='Apache License 2.0',
packages=['hawkular_client_cli'],
install_requires=[
'future>=0.15.0',
'python-dateutil>=2.0.0',
'PyYAML>=3.0',
'hawkular-client>=0.5.2',
],
entry_points={
'console_scripts': ['hawkular-cli=hawkular_client_cli.command_line:main'],
},
include_package_data=True,
zip_safe=False)
| true | true |
1c3ccbd92393233434c82933af106a4d50404ef5 | 16,390 | py | Python | mob_suite/mob_typer.py | dorbarker/mob-suite | 5313f31d19cafbbda396fe588f4a11b1d50a6b08 | [
"Apache-2.0"
] | 1 | 2020-10-15T22:22:25.000Z | 2020-10-15T22:22:25.000Z | mob_suite/mob_typer.py | pavlo888/mob-suite | 5313f31d19cafbbda396fe588f4a11b1d50a6b08 | [
"Apache-2.0"
] | null | null | null | mob_suite/mob_typer.py | pavlo888/mob-suite | 5313f31d19cafbbda396fe588f4a11b1d50a6b08 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
import shutil
import sys
from argparse import (ArgumentParser, FileType)
from mob_suite.version import __version__
import mob_suite.mob_init
from mob_suite.blast import BlastRunner
from mob_suite.blast import BlastReader
from mob_suite.wrappers import circlator
from mob_suite.wrappers import mash
from mob_suite.classes.mcl import mcl
from mob_suite.utils import \
fixStart, \
read_fasta_dict, \
write_fasta_dict, \
filter_overlaping_records, \
replicon_blast, \
mob_blast, \
getRepliconContigs, \
fix_fasta_header, \
getMashBestHit, \
calcFastaStats, \
verify_init, \
check_dependencies
LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
def init_console_logger(lvl):
logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
report_lvl = logging_levels[lvl]
logging.basicConfig(format=LOG_FORMAT, level=report_lvl)
def parse_args():
"Parse the input arguments, use '-h' for help"
default_database_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'databases')
parser = ArgumentParser(
description="Mob Suite: Typing and reconstruction of plasmids from draft and complete assemblies version: {}".format(
__version__))
parser.add_argument('-o', '--outdir', type=str, required=True, help='Output Directory to put results')
parser.add_argument('-i', '--infile', type=str, required=True, help='Input assembly fasta file to process')
parser.add_argument('-n', '--num_threads', type=int, required=False, help='Number of threads to be used', default=1)
parser.add_argument('--min_rep_evalue', type=str, required=False,
help='Minimum evalue threshold for replicon blastn',
default=0.00001)
parser.add_argument('--min_mob_evalue', type=str, required=False,
help='Minimum evalue threshold for relaxase tblastn',
default=0.00001)
parser.add_argument('--min_con_evalue', type=str, required=False, help='Minimum evalue threshold for contig blastn',
default=0.00001)
parser.add_argument('--min_ori_evalue', type=str, required=False,
help='Minimum evalue threshold for oriT elements blastn',
default=0.00001)
parser.add_argument('--min_mpf_evalue', type=str, required=False,
help='Minimum evalue threshold for mpf elements blastn',
default=0.00001)
parser.add_argument('--min_rep_ident', type=int, required=False, help='Minimum sequence identity for replicons',
default=80)
parser.add_argument('--min_mob_ident', type=int, required=False, help='Minimum sequence identity for relaxases',
default=80)
parser.add_argument('--min_ori_ident', type=int, required=False,
help='Minimum sequence identity for oriT elements', default=90)
parser.add_argument('--min_mpf_ident', type=int, required=False,
help='Minimum sequence identity for mpf elements', default=80)
parser.add_argument('--min_rep_cov', type=int, required=False,
help='Minimum percentage coverage of replicon query by input assembly',
default=80)
parser.add_argument('--min_mob_cov', type=int, required=False,
help='Minimum percentage coverage of relaxase query by input assembly',
default=80)
parser.add_argument('--min_ori_cov', type=int, required=False,
help='Minimum percentage coverage of oriT',
default=90)
parser.add_argument('--min_mpf_cov', type=int, required=False,
help='Minimum percentage coverage of mpf',
default=80)
parser.add_argument('--min_overlap', type=int, required=False,
help='Minimum overlap of fragments',
default=10)
parser.add_argument('--keep_tmp', required=False,help='Do not delete temporary file directory', action='store_true')
parser.add_argument('--debug', required=False, help='Show debug information', action='store_true')
parser.add_argument('--plasmid_mash_db', type=str, required=False,
help='Companion Mash database of reference database',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/ncbi_plasmid_full_seqs.fas.msh'))
parser.add_argument('--plasmid_replicons', type=str, required=False, help='Fasta of plasmid replicons',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/rep.dna.fas'))
parser.add_argument('--plasmid_mob', type=str, required=False, help='Fasta of plasmid relaxases',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/mob.proteins.faa'))
parser.add_argument('--plasmid_mpf', type=str, required=False, help='Fasta of known plasmid mate-pair proteins',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/mpf.proteins.faa'))
parser.add_argument('--plasmid_orit', type=str, required=False, help='Fasta of known plasmid oriT dna sequences',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/orit.fas'))
parser.add_argument('-d', '--database_directory',
default=default_database_dir,
required=False,
help='Directory you want to use for your databases. If the databases are not already '
'downloaded, they will be downloaded automatically. Defaults to {}. '
'If you change this from the default, will override --plasmid_mash_db, '
'--plasmid_replicons, --plasmid_mob, --plasmid_mpf, and '
'--plasmid_orit'.format(default_database_dir))
return parser.parse_args()
def determine_mpf_type(hits):
types = dict()
for hit in hits:
type = hits[hit]
if not type in types:
types[type] = 0
types[type] += 1
return max(types, key=lambda i: types[i])
def main():
default_database_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'databases')
args = parse_args()
if args.debug:
init_console_logger(3)
logging.info('Running Mob-typer v. {}'.format(__version__))
if not args.outdir:
logging.info('Error, no output directory specified, please specify one')
sys.exit()
if not args.infile:
logging.info('Error, no fasta specified, please specify one')
sys.exit()
if not os.path.isfile(args.infile):
logging.info('Error, fasta file does not exist')
sys.exit()
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir, 0o755)
if not isinstance(args.num_threads, int):
logging.info('Error number of threads must be an integer, you specified "{}"'.format(args.num_threads))
database_dir = os.path.abspath(args.database_directory)
verify_init(logging, database_dir)
# Script arguments
input_fasta = args.infile
out_dir = args.outdir
num_threads = int(args.num_threads)
keep_tmp = args.keep_tmp
if database_dir == default_database_dir:
mob_ref = args.plasmid_mob
mpf_ref = args.plasmid_mpf
orit_ref = args.plasmid_orit
mash_db = args.plasmid_mash_db
replicon_ref = args.plasmid_replicons
else:
mob_ref = os.path.join(database_dir, 'mob.proteins.faa')
mpf_ref = os.path.join(database_dir, 'mpf.proteins.faa')
orit_ref = os.path.join(database_dir, 'orit.fas')
mash_db = os.path.join(database_dir, 'ncbi_plasmid_full_seqs.fas.msh')
replicon_ref = os.path.join(database_dir, 'rep.dna.fas')
tmp_dir = os.path.join(out_dir, '__tmp')
file_id = os.path.basename(input_fasta)
fixed_fasta = os.path.join(tmp_dir, 'fixed.input.fasta')
replicon_blast_results = os.path.join(tmp_dir, 'replicon_blast_results.txt')
mob_blast_results = os.path.join(tmp_dir, 'mobtyper_blast_results.txt')
mpf_blast_results = os.path.join(tmp_dir, 'mpf_blast_results.txt')
orit_blast_results = os.path.join(tmp_dir, 'orit_blast_results.txt')
if os.path.isfile(mob_blast_results):
os.remove(mob_blast_results)
if os.path.isfile(mpf_blast_results):
os.remove(mpf_blast_results)
if os.path.isfile(orit_blast_results):
os.remove(orit_blast_results)
if os.path.isfile(replicon_blast_results):
os.remove(replicon_blast_results)
report_file = os.path.join(out_dir, 'mobtyper_' + file_id + '_report.txt')
mash_file = os.path.join(tmp_dir, 'mash_' + file_id + '.txt')
# Input numeric params
min_rep_ident = float(args.min_rep_ident)
min_mob_ident = float(args.min_mob_ident)
min_ori_ident = float(args.min_ori_ident)
min_mpf_ident = float(args.min_mpf_ident)
idents = {'min_rep_ident': min_rep_ident, 'min_mob_ident': min_mob_ident, 'min_ori_ident': min_ori_ident}
for param in idents:
value = float(idents[param])
if value < 60:
logging.error("Error: {} is too low, please specify an integer between 70 - 100".format(param))
sys.exit(-1)
if value > 100:
logging.error("Error: {} is too high, please specify an integer between 70 - 100".format(param))
sys.exit(-1)
min_rep_cov = float(args.min_rep_cov)
min_mob_cov = float(args.min_mob_cov)
min_ori_cov = float(args.min_ori_cov)
min_mpf_cov = float(args.min_mpf_cov)
covs = {'min_rep_cov': min_rep_cov, 'min_mob_cov': min_mob_cov, 'min_con_cov': min_ori_cov,
'min_rpp_cov': min_ori_cov}
for param in covs:
value = float(covs[param])
if value < 60:
logging.error("Error: {} is too low, please specify an integer between 50 - 100".format(param))
sys.exit(-1)
if value > 100:
logging.error("Error: {} is too high, please specify an integer between 50 - 100".format(param))
sys.exit(-1)
min_rep_evalue = float(args.min_rep_evalue)
min_mob_evalue = float(args.min_mob_evalue)
min_ori_evalue = float(args.min_ori_evalue)
min_mpf_evalue = float(args.min_mpf_evalue)
evalues = {'min_rep_evalue': min_rep_evalue, 'min_mob_evalue': min_mob_evalue, 'min_con_evalue': min_ori_evalue}
for param in evalues:
value = float(evalues[param])
if value > 1:
logging.error("Error: {} is too high, please specify an float evalue between 0 to 1".format(param))
sys.exit(-1)
check_dependencies(logging)
needed_dbs = [replicon_ref, mob_ref, mash_db, mpf_ref]
for db in needed_dbs:
if (not os.path.isfile(db)):
logging.info('Warning! Needed database missing "{}"'.format(db))
mob_suite.mob_init.main()
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir, 0o755)
fix_fasta_header(input_fasta, fixed_fasta)
# run individual marker blasts
logging.info('Running replicon blast on {}'.format(replicon_ref))
replicon_contigs = getRepliconContigs(
replicon_blast(replicon_ref, fixed_fasta, min_rep_ident, min_rep_cov, min_rep_evalue, tmp_dir, replicon_blast_results,
num_threads=num_threads))
found_replicons = dict()
for contig_id in replicon_contigs:
for hit in replicon_contigs[contig_id]:
acs, type = hit.split('|')
found_replicons[acs] = type
logging.info('Running relaxase blast on {}'.format(mob_ref))
mob_contigs = getRepliconContigs(
mob_blast(mob_ref, fixed_fasta, min_mob_ident, min_mob_cov, min_mob_evalue, tmp_dir, mob_blast_results, num_threads=num_threads))
found_mob = dict()
for contig_id in mob_contigs:
for hit in mob_contigs[contig_id]:
acs, type = hit.split('|')
found_mob[acs] = type
# print (found_mob)
logging.info('Running mpf blast on {}'.format(mob_ref))
mpf_contigs = getRepliconContigs(
mob_blast(mpf_ref, fixed_fasta, min_mpf_ident, min_mpf_cov, min_mpf_evalue, tmp_dir, mpf_blast_results, num_threads=num_threads))
found_mpf = dict()
for contig_id in mpf_contigs:
for hit in mpf_contigs[contig_id]:
acs, type = hit.split('|')
found_mpf[acs] = type
# print(found_mpf)
logging.info('Running orit blast on {}'.format(replicon_ref))
orit_contigs = getRepliconContigs(
replicon_blast(orit_ref, fixed_fasta, min_ori_ident, min_ori_cov, min_ori_evalue, tmp_dir, orit_blast_results,
num_threads=num_threads))
found_orit = dict()
for contig_id in orit_contigs:
for hit in orit_contigs[contig_id]:
acs, type = hit.split('|')
found_orit[acs] = type
# Get closest neighbor by mash distance
m = mash()
mash_distances = dict()
mashfile_handle = open(mash_file, 'w')
m.run_mash(mash_db, fixed_fasta, mashfile_handle)
mash_results = m.read_mash(mash_file)
mash_top_hit = getMashBestHit(mash_results)
results_fh = open(report_file, 'w')
results_fh.write("file_id\tnum_contigs\ttotal_length\tgc\t" \
"rep_type(s)\trep_type_accession(s)\t" \
"relaxase_type(s)\trelaxase_type_accession(s)\t" \
"mpf_type\tmpf_type_accession(s)\t" \
"orit_type(s)\torit_accession(s)\tPredictedMobility\t" \
"mash_nearest_neighbor\tmash_neighbor_distance\tmash_neighbor_cluster\n")
if len(found_replicons) > 0:
rep_types = ",".join(list(found_replicons.values()))
rep_acs = ",".join(list(found_replicons.keys()))
else:
rep_types = "-"
rep_acs = "-"
if len(found_mob) > 0:
mob_types = ",".join(list(found_mob.values()))
mob_acs = ",".join(list(found_mob.keys()))
else:
mob_types = "-"
mob_acs = "-"
if len(found_mpf) > 0:
mpf_type = determine_mpf_type(found_mpf)
mpf_acs = ",".join(list(found_mpf.keys()))
else:
mpf_type = "-"
mpf_acs = "-"
if len(found_orit) > 0:
orit_types = ",".join(list(found_orit.values()))
orit_acs = ",".join(list(found_orit.keys()))
else:
orit_types = "-"
orit_acs = "-"
stats = calcFastaStats(fixed_fasta)
predicted_mobility = 'Non-mobilizable'
if mob_acs != '-' or orit_acs != '-':
predicted_mobility = 'Mobilizable'
if mob_acs != '-' and mpf_acs != '-':
predicted_mobility = 'Conjugative'
string = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(file_id, stats['num_seq'],
stats['size'], stats['gc_content'],
rep_types, rep_acs, mob_types,
mob_acs, mpf_type, mpf_acs,
orit_types, orit_acs,
predicted_mobility,
mash_top_hit['top_hit'],
mash_top_hit['mash_hit_score'],
mash_top_hit['clustid'])
results_fh.write(string)
if not keep_tmp:
shutil.rmtree(tmp_dir)
print("{}".format(string))
# call main function
if __name__ == '__main__':
main()
| 41.284635 | 137 | 0.612386 |
import logging
import os
import shutil
import sys
from argparse import (ArgumentParser, FileType)
from mob_suite.version import __version__
import mob_suite.mob_init
from mob_suite.blast import BlastRunner
from mob_suite.blast import BlastReader
from mob_suite.wrappers import circlator
from mob_suite.wrappers import mash
from mob_suite.classes.mcl import mcl
from mob_suite.utils import \
fixStart, \
read_fasta_dict, \
write_fasta_dict, \
filter_overlaping_records, \
replicon_blast, \
mob_blast, \
getRepliconContigs, \
fix_fasta_header, \
getMashBestHit, \
calcFastaStats, \
verify_init, \
check_dependencies
LOG_FORMAT = '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
def init_console_logger(lvl):
logging_levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
report_lvl = logging_levels[lvl]
logging.basicConfig(format=LOG_FORMAT, level=report_lvl)
def parse_args():
default_database_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'databases')
parser = ArgumentParser(
description="Mob Suite: Typing and reconstruction of plasmids from draft and complete assemblies version: {}".format(
__version__))
parser.add_argument('-o', '--outdir', type=str, required=True, help='Output Directory to put results')
parser.add_argument('-i', '--infile', type=str, required=True, help='Input assembly fasta file to process')
parser.add_argument('-n', '--num_threads', type=int, required=False, help='Number of threads to be used', default=1)
parser.add_argument('--min_rep_evalue', type=str, required=False,
help='Minimum evalue threshold for replicon blastn',
default=0.00001)
parser.add_argument('--min_mob_evalue', type=str, required=False,
help='Minimum evalue threshold for relaxase tblastn',
default=0.00001)
parser.add_argument('--min_con_evalue', type=str, required=False, help='Minimum evalue threshold for contig blastn',
default=0.00001)
parser.add_argument('--min_ori_evalue', type=str, required=False,
help='Minimum evalue threshold for oriT elements blastn',
default=0.00001)
parser.add_argument('--min_mpf_evalue', type=str, required=False,
help='Minimum evalue threshold for mpf elements blastn',
default=0.00001)
parser.add_argument('--min_rep_ident', type=int, required=False, help='Minimum sequence identity for replicons',
default=80)
parser.add_argument('--min_mob_ident', type=int, required=False, help='Minimum sequence identity for relaxases',
default=80)
parser.add_argument('--min_ori_ident', type=int, required=False,
help='Minimum sequence identity for oriT elements', default=90)
parser.add_argument('--min_mpf_ident', type=int, required=False,
help='Minimum sequence identity for mpf elements', default=80)
parser.add_argument('--min_rep_cov', type=int, required=False,
help='Minimum percentage coverage of replicon query by input assembly',
default=80)
parser.add_argument('--min_mob_cov', type=int, required=False,
help='Minimum percentage coverage of relaxase query by input assembly',
default=80)
parser.add_argument('--min_ori_cov', type=int, required=False,
help='Minimum percentage coverage of oriT',
default=90)
parser.add_argument('--min_mpf_cov', type=int, required=False,
help='Minimum percentage coverage of mpf',
default=80)
parser.add_argument('--min_overlap', type=int, required=False,
help='Minimum overlap of fragments',
default=10)
parser.add_argument('--keep_tmp', required=False,help='Do not delete temporary file directory', action='store_true')
parser.add_argument('--debug', required=False, help='Show debug information', action='store_true')
parser.add_argument('--plasmid_mash_db', type=str, required=False,
help='Companion Mash database of reference database',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/ncbi_plasmid_full_seqs.fas.msh'))
parser.add_argument('--plasmid_replicons', type=str, required=False, help='Fasta of plasmid replicons',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/rep.dna.fas'))
parser.add_argument('--plasmid_mob', type=str, required=False, help='Fasta of plasmid relaxases',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/mob.proteins.faa'))
parser.add_argument('--plasmid_mpf', type=str, required=False, help='Fasta of known plasmid mate-pair proteins',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/mpf.proteins.faa'))
parser.add_argument('--plasmid_orit', type=str, required=False, help='Fasta of known plasmid oriT dna sequences',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'databases/orit.fas'))
parser.add_argument('-d', '--database_directory',
default=default_database_dir,
required=False,
help='Directory you want to use for your databases. If the databases are not already '
'downloaded, they will be downloaded automatically. Defaults to {}. '
'If you change this from the default, will override --plasmid_mash_db, '
'--plasmid_replicons, --plasmid_mob, --plasmid_mpf, and '
'--plasmid_orit'.format(default_database_dir))
return parser.parse_args()
def determine_mpf_type(hits):
types = dict()
for hit in hits:
type = hits[hit]
if not type in types:
types[type] = 0
types[type] += 1
return max(types, key=lambda i: types[i])
def main():
default_database_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'databases')
args = parse_args()
if args.debug:
init_console_logger(3)
logging.info('Running Mob-typer v. {}'.format(__version__))
if not args.outdir:
logging.info('Error, no output directory specified, please specify one')
sys.exit()
if not args.infile:
logging.info('Error, no fasta specified, please specify one')
sys.exit()
if not os.path.isfile(args.infile):
logging.info('Error, fasta file does not exist')
sys.exit()
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir, 0o755)
if not isinstance(args.num_threads, int):
logging.info('Error number of threads must be an integer, you specified "{}"'.format(args.num_threads))
database_dir = os.path.abspath(args.database_directory)
verify_init(logging, database_dir)
input_fasta = args.infile
out_dir = args.outdir
num_threads = int(args.num_threads)
keep_tmp = args.keep_tmp
if database_dir == default_database_dir:
mob_ref = args.plasmid_mob
mpf_ref = args.plasmid_mpf
orit_ref = args.plasmid_orit
mash_db = args.plasmid_mash_db
replicon_ref = args.plasmid_replicons
else:
mob_ref = os.path.join(database_dir, 'mob.proteins.faa')
mpf_ref = os.path.join(database_dir, 'mpf.proteins.faa')
orit_ref = os.path.join(database_dir, 'orit.fas')
mash_db = os.path.join(database_dir, 'ncbi_plasmid_full_seqs.fas.msh')
replicon_ref = os.path.join(database_dir, 'rep.dna.fas')
tmp_dir = os.path.join(out_dir, '__tmp')
file_id = os.path.basename(input_fasta)
fixed_fasta = os.path.join(tmp_dir, 'fixed.input.fasta')
replicon_blast_results = os.path.join(tmp_dir, 'replicon_blast_results.txt')
mob_blast_results = os.path.join(tmp_dir, 'mobtyper_blast_results.txt')
mpf_blast_results = os.path.join(tmp_dir, 'mpf_blast_results.txt')
orit_blast_results = os.path.join(tmp_dir, 'orit_blast_results.txt')
if os.path.isfile(mob_blast_results):
os.remove(mob_blast_results)
if os.path.isfile(mpf_blast_results):
os.remove(mpf_blast_results)
if os.path.isfile(orit_blast_results):
os.remove(orit_blast_results)
if os.path.isfile(replicon_blast_results):
os.remove(replicon_blast_results)
report_file = os.path.join(out_dir, 'mobtyper_' + file_id + '_report.txt')
mash_file = os.path.join(tmp_dir, 'mash_' + file_id + '.txt')
min_rep_ident = float(args.min_rep_ident)
min_mob_ident = float(args.min_mob_ident)
min_ori_ident = float(args.min_ori_ident)
min_mpf_ident = float(args.min_mpf_ident)
idents = {'min_rep_ident': min_rep_ident, 'min_mob_ident': min_mob_ident, 'min_ori_ident': min_ori_ident}
for param in idents:
value = float(idents[param])
if value < 60:
logging.error("Error: {} is too low, please specify an integer between 70 - 100".format(param))
sys.exit(-1)
if value > 100:
logging.error("Error: {} is too high, please specify an integer between 70 - 100".format(param))
sys.exit(-1)
min_rep_cov = float(args.min_rep_cov)
min_mob_cov = float(args.min_mob_cov)
min_ori_cov = float(args.min_ori_cov)
min_mpf_cov = float(args.min_mpf_cov)
covs = {'min_rep_cov': min_rep_cov, 'min_mob_cov': min_mob_cov, 'min_con_cov': min_ori_cov,
'min_rpp_cov': min_ori_cov}
for param in covs:
value = float(covs[param])
if value < 60:
logging.error("Error: {} is too low, please specify an integer between 50 - 100".format(param))
sys.exit(-1)
if value > 100:
logging.error("Error: {} is too high, please specify an integer between 50 - 100".format(param))
sys.exit(-1)
min_rep_evalue = float(args.min_rep_evalue)
min_mob_evalue = float(args.min_mob_evalue)
min_ori_evalue = float(args.min_ori_evalue)
min_mpf_evalue = float(args.min_mpf_evalue)
evalues = {'min_rep_evalue': min_rep_evalue, 'min_mob_evalue': min_mob_evalue, 'min_con_evalue': min_ori_evalue}
for param in evalues:
value = float(evalues[param])
if value > 1:
logging.error("Error: {} is too high, please specify an float evalue between 0 to 1".format(param))
sys.exit(-1)
check_dependencies(logging)
needed_dbs = [replicon_ref, mob_ref, mash_db, mpf_ref]
for db in needed_dbs:
if (not os.path.isfile(db)):
logging.info('Warning! Needed database missing "{}"'.format(db))
mob_suite.mob_init.main()
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir, 0o755)
fix_fasta_header(input_fasta, fixed_fasta)
logging.info('Running replicon blast on {}'.format(replicon_ref))
replicon_contigs = getRepliconContigs(
replicon_blast(replicon_ref, fixed_fasta, min_rep_ident, min_rep_cov, min_rep_evalue, tmp_dir, replicon_blast_results,
num_threads=num_threads))
found_replicons = dict()
for contig_id in replicon_contigs:
for hit in replicon_contigs[contig_id]:
acs, type = hit.split('|')
found_replicons[acs] = type
logging.info('Running relaxase blast on {}'.format(mob_ref))
mob_contigs = getRepliconContigs(
mob_blast(mob_ref, fixed_fasta, min_mob_ident, min_mob_cov, min_mob_evalue, tmp_dir, mob_blast_results, num_threads=num_threads))
found_mob = dict()
for contig_id in mob_contigs:
for hit in mob_contigs[contig_id]:
acs, type = hit.split('|')
found_mob[acs] = type
logging.info('Running mpf blast on {}'.format(mob_ref))
mpf_contigs = getRepliconContigs(
mob_blast(mpf_ref, fixed_fasta, min_mpf_ident, min_mpf_cov, min_mpf_evalue, tmp_dir, mpf_blast_results, num_threads=num_threads))
found_mpf = dict()
for contig_id in mpf_contigs:
for hit in mpf_contigs[contig_id]:
acs, type = hit.split('|')
found_mpf[acs] = type
logging.info('Running orit blast on {}'.format(replicon_ref))
orit_contigs = getRepliconContigs(
replicon_blast(orit_ref, fixed_fasta, min_ori_ident, min_ori_cov, min_ori_evalue, tmp_dir, orit_blast_results,
num_threads=num_threads))
found_orit = dict()
for contig_id in orit_contigs:
for hit in orit_contigs[contig_id]:
acs, type = hit.split('|')
found_orit[acs] = type
m = mash()
mash_distances = dict()
mashfile_handle = open(mash_file, 'w')
m.run_mash(mash_db, fixed_fasta, mashfile_handle)
mash_results = m.read_mash(mash_file)
mash_top_hit = getMashBestHit(mash_results)
results_fh = open(report_file, 'w')
results_fh.write("file_id\tnum_contigs\ttotal_length\tgc\t" \
"rep_type(s)\trep_type_accession(s)\t" \
"relaxase_type(s)\trelaxase_type_accession(s)\t" \
"mpf_type\tmpf_type_accession(s)\t" \
"orit_type(s)\torit_accession(s)\tPredictedMobility\t" \
"mash_nearest_neighbor\tmash_neighbor_distance\tmash_neighbor_cluster\n")
if len(found_replicons) > 0:
rep_types = ",".join(list(found_replicons.values()))
rep_acs = ",".join(list(found_replicons.keys()))
else:
rep_types = "-"
rep_acs = "-"
if len(found_mob) > 0:
mob_types = ",".join(list(found_mob.values()))
mob_acs = ",".join(list(found_mob.keys()))
else:
mob_types = "-"
mob_acs = "-"
if len(found_mpf) > 0:
mpf_type = determine_mpf_type(found_mpf)
mpf_acs = ",".join(list(found_mpf.keys()))
else:
mpf_type = "-"
mpf_acs = "-"
if len(found_orit) > 0:
orit_types = ",".join(list(found_orit.values()))
orit_acs = ",".join(list(found_orit.keys()))
else:
orit_types = "-"
orit_acs = "-"
stats = calcFastaStats(fixed_fasta)
predicted_mobility = 'Non-mobilizable'
if mob_acs != '-' or orit_acs != '-':
predicted_mobility = 'Mobilizable'
if mob_acs != '-' and mpf_acs != '-':
predicted_mobility = 'Conjugative'
string = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(file_id, stats['num_seq'],
stats['size'], stats['gc_content'],
rep_types, rep_acs, mob_types,
mob_acs, mpf_type, mpf_acs,
orit_types, orit_acs,
predicted_mobility,
mash_top_hit['top_hit'],
mash_top_hit['mash_hit_score'],
mash_top_hit['clustid'])
results_fh.write(string)
if not keep_tmp:
shutil.rmtree(tmp_dir)
print("{}".format(string))
if __name__ == '__main__':
main()
| true | true |
1c3ccd6a141bd876593c8ff1adbe21d0e99e5fd5 | 2,717 | py | Python | _V3/step5.py | cermegno/Foodie-Blog | e9d262902a9d2111c3a03ccb4ceb28a4201176aa | [
"MIT"
] | null | null | null | _V3/step5.py | cermegno/Foodie-Blog | e9d262902a9d2111c3a03ccb4ceb28a4201176aa | [
"MIT"
] | null | null | null | _V3/step5.py | cermegno/Foodie-Blog | e9d262902a9d2111c3a03ccb4ceb28a4201176aa | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import boto
import redis
import json
from flask import Flask, render_template, redirect, request, url_for, make_response
from werkzeug import secure_filename
if 'VCAP_SERVICES' in os.environ:
VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES'])
CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"]
r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"])
else:
r = redis.Redis(host='127.0.0.1', port='6379')
app = Flask(__name__)
@app.route('/')
def mainpage():
CalorieCount = r.get('caloriecount')
response = """
<HTML><BODY><h2>Welcome to my Food Blog</h2>
<a href="/newmeal">Add New Meal</a><br>
<a href="/dumpmeals">Show Meal Blog</a><br><br>
Calories so far: <b>{}</b>
</BODY>
""".format(str(CalorieCount,'utf-8'))
return response
@app.route('/newmeal')
def survey():
resp = make_response(render_template('newmeal.html'))
return resp
@app.route('/mealthankyou.html', methods=['POST'])
def mealthankyou():
global r
d = request.form['mealdate']
m = request.form['mealtype']
c = request.form['calories']
t = request.form['description']
print ("Mealtype is " + m)
print ("Calories is " + c)
print ("Calories are " + c)
print ("Description: " + t)
r.incrby('caloriecount',int(c))
Counter = r.incr('counter_meal')
print ("the meal counter is now: ", Counter)
## Create a new key that with the counter and pad with leading zeroes
newmeal = 'meal' + str(Counter).zfill(3)
print (newmeal)
print ("Storing the meal now")
## Now the key name is the content of the variable newsurvey
r.hmset(newmeal,{'mealdate':d, 'mealtype':m,'calories':c, 'description':t})
resp = """
<h3> - New entry added to the blog - </h3>
<a href="/">Back to main menu</a>
"""
return resp
@app.route('/dumpmeals')
def dumpmeals():
global r
response = "<center><h1>Meals to date</h1>"
response += "--------------------------<br>"
print ("Reading back from Redis")
for eachmeal in sorted(r.keys('meal*')):
response += "Meal Date : " + str(r.hget(eachmeal,'mealdate'),'utf-8') + "<br>"
response += "Meal Type : " + str(r.hget(eachmeal,'mealtype'),'utf-8') + "<br>"
response += "Calories : " + str(r.hget(eachmeal,'calories'),'utf-8') + "<br>"
response += "Description : " + str(r.hget(eachmeal,'description'),'utf-8') + "<br>"
response += "<hr>"
return response
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', \
port=int(os.getenv('PORT', '5000')), threaded=True)
| 30.52809 | 109 | 0.612808 |
import os
import re
import boto
import redis
import json
from flask import Flask, render_template, redirect, request, url_for, make_response
from werkzeug import secure_filename
if 'VCAP_SERVICES' in os.environ:
VCAP_SERVICES = json.loads(os.environ['VCAP_SERVICES'])
CREDENTIALS = VCAP_SERVICES["rediscloud"][0]["credentials"]
r = redis.Redis(host=CREDENTIALS["hostname"], port=CREDENTIALS["port"], password=CREDENTIALS["password"])
else:
r = redis.Redis(host='127.0.0.1', port='6379')
app = Flask(__name__)
@app.route('/')
def mainpage():
CalorieCount = r.get('caloriecount')
response = """
<HTML><BODY><h2>Welcome to my Food Blog</h2>
<a href="/newmeal">Add New Meal</a><br>
<a href="/dumpmeals">Show Meal Blog</a><br><br>
Calories so far: <b>{}</b>
</BODY>
""".format(str(CalorieCount,'utf-8'))
return response
@app.route('/newmeal')
def survey():
resp = make_response(render_template('newmeal.html'))
return resp
@app.route('/mealthankyou.html', methods=['POST'])
def mealthankyou():
global r
d = request.form['mealdate']
m = request.form['mealtype']
c = request.form['calories']
t = request.form['description']
print ("Mealtype is " + m)
print ("Calories is " + c)
print ("Calories are " + c)
print ("Description: " + t)
r.incrby('caloriecount',int(c))
Counter = r.incr('counter_meal')
print ("the meal counter is now: ", Counter)
print ("Storing the meal now")
:c, 'description':t})
resp = """
<h3> - New entry added to the blog - </h3>
<a href="/">Back to main menu</a>
"""
return resp
@app.route('/dumpmeals')
def dumpmeals():
global r
response = "<center><h1>Meals to date</h1>"
response += "--------------------------<br>"
print ("Reading back from Redis")
for eachmeal in sorted(r.keys('meal*')):
response += "Meal Date : " + str(r.hget(eachmeal,'mealdate'),'utf-8') + "<br>"
response += "Meal Type : " + str(r.hget(eachmeal,'mealtype'),'utf-8') + "<br>"
response += "Calories : " + str(r.hget(eachmeal,'calories'),'utf-8') + "<br>"
response += "Description : " + str(r.hget(eachmeal,'description'),'utf-8') + "<br>"
response += "<hr>"
return response
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', \
port=int(os.getenv('PORT', '5000')), threaded=True)
| true | true |
1c3ccde0cfc42164c991ad21a68401fac16479e7 | 103 | py | Python | App/__init__.py | paul-ollis/cleversheep3 | 86e6ca76ea4e8524f16e2348d38484dcfafb07d0 | [
"Apache-2.0"
] | null | null | null | App/__init__.py | paul-ollis/cleversheep3 | 86e6ca76ea4e8524f16e2348d38484dcfafb07d0 | [
"Apache-2.0"
] | null | null | null | App/__init__.py | paul-ollis/cleversheep3 | 86e6ca76ea4e8524f16e2348d38484dcfafb07d0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Modules that help writing applications."""
__docformat__ = "restructuredtext"
| 25.75 | 45 | 0.757282 |
__docformat__ = "restructuredtext"
| true | true |
1c3cce95c6725dbc1bf0208a3ddd7af6a510bbbc | 393 | py | Python | bayespy/tests/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | 622 | 2015-01-15T19:46:06.000Z | 2022-03-22T13:40:22.000Z | bayespy/tests/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | 118 | 2015-01-04T06:38:23.000Z | 2021-11-05T17:28:02.000Z | bayespy/tests/__init__.py | dungvtdev/upsbayescpm | f6ee877c689046d3c57a2ac06742cfe4a0b6550e | [
"MIT"
] | 160 | 2015-02-16T15:30:43.000Z | 2022-03-14T00:52:36.000Z | ################################################################################
# Copyright (C) 2015 Hannu Hartikainen
#
# This file is licensed under the MIT License.
################################################################################
import bayespy.plot as bpplt
def setup():
for i in bpplt.pyplot.get_fignums():
fig = bpplt.pyplot.figure(i)
fig.clear()
| 28.071429 | 80 | 0.394402 | true | true | |
1c3cceea0e62241d61e2790040726e2c235e561b | 5,023 | py | Python | src/pymor/algorithms/randrangefinder.py | TreeerT/pymor | e8b18d2d4c4b5998f0bd84f6728e365e0693b753 | [
"Unlicense"
] | 1 | 2021-08-17T15:55:12.000Z | 2021-08-17T15:55:12.000Z | src/pymor/algorithms/randrangefinder.py | TreeerT/pymor | e8b18d2d4c4b5998f0bd84f6728e365e0693b753 | [
"Unlicense"
] | null | null | null | src/pymor/algorithms/randrangefinder.py | TreeerT/pymor | e8b18d2d4c4b5998f0bd84f6728e365e0693b753 | [
"Unlicense"
] | null | null | null | # This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
from scipy.sparse.linalg import eigsh, LinearOperator
from scipy.special import erfinv
from pymor.algorithms.gram_schmidt import gram_schmidt
from pymor.core.defaults import defaults
from pymor.operators.interface import Operator
@defaults('tol', 'failure_tolerance', 'num_testvecs')
def adaptive_rrf(A, source_product=None, range_product=None, tol=1e-4,
failure_tolerance=1e-15, num_testvecs=20, lambda_min=None, iscomplex=False):
r"""Adaptive randomized range approximation of `A`.
This is an implementation of Algorithm 1 in :cite:`BS18`.
Given the |Operator| `A`, the return value of this method is the |VectorArray|
`B` with the property
.. math::
\Vert A - P_{span(B)} A \Vert \leq tol
with a failure probability smaller than `failure_tolerance`, where the norm denotes the
operator norm. The inner product of the range of `A` is given by `range_product` and
the inner product of the source of `A` is given by `source_product`.
Parameters
----------
A
The |Operator| A.
source_product
Inner product |Operator| of the source of A.
range_product
Inner product |Operator| of the range of A.
tol
Error tolerance for the algorithm.
failure_tolerance
Maximum failure probability.
num_testvecs
Number of test vectors.
lambda_min
The smallest eigenvalue of source_product.
If `None`, the smallest eigenvalue is computed using scipy.
iscomplex
If `True`, the random vectors are chosen complex.
Returns
-------
B
|VectorArray| which contains the basis, whose span approximates the range of A.
"""
assert source_product is None or isinstance(source_product, Operator)
assert range_product is None or isinstance(range_product, Operator)
assert isinstance(A, Operator)
B = A.range.empty()
R = A.source.random(num_testvecs, distribution='normal')
if iscomplex:
R += 1j*A.source.random(num_testvecs, distribution='normal')
if source_product is None:
lambda_min = 1
elif lambda_min is None:
def mv(v):
return source_product.apply(source_product.source.from_numpy(v)).to_numpy()
def mvinv(v):
return source_product.apply_inverse(source_product.range.from_numpy(v)).to_numpy()
L = LinearOperator((source_product.source.dim, source_product.range.dim), matvec=mv)
Linv = LinearOperator((source_product.range.dim, source_product.source.dim), matvec=mvinv)
lambda_min = eigsh(L, sigma=0, which="LM", return_eigenvectors=False, k=1, OPinv=Linv)[0]
testfail = failure_tolerance / min(A.source.dim, A.range.dim)
testlimit = np.sqrt(2. * lambda_min) * erfinv(testfail**(1. / num_testvecs)) * tol
maxnorm = np.inf
M = A.apply(R)
while(maxnorm > testlimit):
basis_length = len(B)
v = A.source.random(distribution='normal')
if iscomplex:
v += 1j*A.source.random(distribution='normal')
B.append(A.apply(v))
gram_schmidt(B, range_product, atol=0, rtol=0, offset=basis_length, copy=False)
M -= B.lincomb(B.inner(M, range_product).T)
maxnorm = np.max(M.norm(range_product))
return B
@defaults('q', 'l')
def rrf(A, source_product=None, range_product=None, q=2, l=8, iscomplex=False):
"""Randomized range approximation of `A`.
This is an implementation of Algorithm 4.4 in :cite:`HMT11`.
Given the |Operator| `A`, the return value of this method is the |VectorArray|
`Q` whose vectors form an approximate orthonomal basis for the range of `A`.
Parameters
----------
A
The |Operator| A.
source_product
Inner product |Operator| of the source of A.
range_product
Inner product |Operator| of the range of A.
q
The number of power iterations.
l
The block size of the normalized power iterations.
iscomplex
If `True`, the random vectors are chosen complex.
Returns
-------
Q
|VectorArray| which contains the basis, whose span approximates the range of A.
"""
assert source_product is None or isinstance(source_product, Operator)
assert range_product is None or isinstance(range_product, Operator)
assert isinstance(A, Operator)
R = A.source.random(l, distribution='normal')
if iscomplex:
R += 1j*A.source.random(l, distribution='normal')
Q = A.apply(R)
gram_schmidt(Q, range_product, atol=0, rtol=0, copy=False)
for i in range(q):
Q = A.apply_adjoint(Q)
gram_schmidt(Q, source_product, atol=0, rtol=0, copy=False)
Q = A.apply(Q)
gram_schmidt(Q, range_product, atol=0, rtol=0, copy=False)
return Q
| 34.881944 | 98 | 0.673104 |
import numpy as np
from scipy.sparse.linalg import eigsh, LinearOperator
from scipy.special import erfinv
from pymor.algorithms.gram_schmidt import gram_schmidt
from pymor.core.defaults import defaults
from pymor.operators.interface import Operator
@defaults('tol', 'failure_tolerance', 'num_testvecs')
def adaptive_rrf(A, source_product=None, range_product=None, tol=1e-4,
failure_tolerance=1e-15, num_testvecs=20, lambda_min=None, iscomplex=False):
assert source_product is None or isinstance(source_product, Operator)
assert range_product is None or isinstance(range_product, Operator)
assert isinstance(A, Operator)
B = A.range.empty()
R = A.source.random(num_testvecs, distribution='normal')
if iscomplex:
R += 1j*A.source.random(num_testvecs, distribution='normal')
if source_product is None:
lambda_min = 1
elif lambda_min is None:
def mv(v):
return source_product.apply(source_product.source.from_numpy(v)).to_numpy()
def mvinv(v):
return source_product.apply_inverse(source_product.range.from_numpy(v)).to_numpy()
L = LinearOperator((source_product.source.dim, source_product.range.dim), matvec=mv)
Linv = LinearOperator((source_product.range.dim, source_product.source.dim), matvec=mvinv)
lambda_min = eigsh(L, sigma=0, which="LM", return_eigenvectors=False, k=1, OPinv=Linv)[0]
testfail = failure_tolerance / min(A.source.dim, A.range.dim)
testlimit = np.sqrt(2. * lambda_min) * erfinv(testfail**(1. / num_testvecs)) * tol
maxnorm = np.inf
M = A.apply(R)
while(maxnorm > testlimit):
basis_length = len(B)
v = A.source.random(distribution='normal')
if iscomplex:
v += 1j*A.source.random(distribution='normal')
B.append(A.apply(v))
gram_schmidt(B, range_product, atol=0, rtol=0, offset=basis_length, copy=False)
M -= B.lincomb(B.inner(M, range_product).T)
maxnorm = np.max(M.norm(range_product))
return B
@defaults('q', 'l')
def rrf(A, source_product=None, range_product=None, q=2, l=8, iscomplex=False):
assert source_product is None or isinstance(source_product, Operator)
assert range_product is None or isinstance(range_product, Operator)
assert isinstance(A, Operator)
R = A.source.random(l, distribution='normal')
if iscomplex:
R += 1j*A.source.random(l, distribution='normal')
Q = A.apply(R)
gram_schmidt(Q, range_product, atol=0, rtol=0, copy=False)
for i in range(q):
Q = A.apply_adjoint(Q)
gram_schmidt(Q, source_product, atol=0, rtol=0, copy=False)
Q = A.apply(Q)
gram_schmidt(Q, range_product, atol=0, rtol=0, copy=False)
return Q
| true | true |
1c3ccf3e4cb85f0d97103912a0a229d42a88b9b8 | 6,406 | py | Python | tests/test_routing.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | 11 | 2021-06-21T19:56:01.000Z | 2021-12-22T09:06:09.000Z | tests/test_routing.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | null | null | null | tests/test_routing.py | Vsevolod-pl/hivemind | 0300cfd91adeb14d91d9659a98221628f9b775b9 | [
"MIT"
] | null | null | null | import random
import heapq
import operator
from itertools import chain, zip_longest
from hivemind import LOCALHOST
from hivemind.dht.routing import RoutingTable, DHTID
def test_ids_basic():
# basic functionality tests
for i in range(100):
id1, id2 = DHTID.generate(), DHTID.generate()
assert DHTID.MIN <= id1 < DHTID.MAX and DHTID.MIN <= id2 <= DHTID.MAX
assert DHTID.xor_distance(id1, id1) == DHTID.xor_distance(id2, id2) == 0
assert DHTID.xor_distance(id1, id2) > 0 or (id1 == id2)
assert DHTID.from_bytes(bytes(id1)) == id1 and DHTID.from_bytes(id2.to_bytes()) == id2
def test_ids_depth():
for i in range(100):
ids = [random.randint(0, 4096) for i in range(random.randint(1, 256))]
ours = DHTID.longest_common_prefix_length(*map(DHTID, ids))
ids_bitstr = [
"".join(bin(bite)[2:].rjust(8, '0') for bite in uid.to_bytes(20, 'big'))
for uid in ids
]
reference = len(shared_prefix(*ids_bitstr))
assert reference == ours, f"ours {ours} != reference {reference}, ids: {ids}"
def test_routing_table_basic():
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=20, depth_modulo=5)
added_nodes = []
for phony_neighbor_port in random.sample(range(10000), 100):
phony_id = DHTID.generate()
routing_table.add_or_update_node(phony_id, f'{LOCALHOST}:{phony_neighbor_port}')
assert phony_id in routing_table
assert f'{LOCALHOST}:{phony_neighbor_port}' in routing_table
assert routing_table[phony_id] == f'{LOCALHOST}:{phony_neighbor_port}'
assert routing_table[f'{LOCALHOST}:{phony_neighbor_port}'] == phony_id
added_nodes.append(phony_id)
assert routing_table.buckets[0].lower == DHTID.MIN and routing_table.buckets[-1].upper == DHTID.MAX
for bucket in routing_table.buckets:
assert len(bucket.replacement_nodes) == 0, "There should be no replacement nodes in a table with 100 entries"
assert 3 <= len(routing_table.buckets) <= 10, len(routing_table.buckets)
random_node = random.choice(added_nodes)
assert routing_table.get(node_id=random_node) == routing_table[random_node]
dummy_node = DHTID.generate()
assert (dummy_node not in routing_table) == (routing_table.get(node_id=dummy_node) is None)
for node in added_nodes:
found_bucket_index = routing_table.get_bucket_index(node)
for bucket_index, bucket in enumerate(routing_table.buckets):
if bucket.lower <= node < bucket.upper:
break
else:
raise ValueError("Naive search could not find bucket. Universe has gone crazy.")
assert bucket_index == found_bucket_index
def test_routing_table_parameters():
for (bucket_size, modulo, min_nbuckets, max_nbuckets) in [
(20, 5, 45, 65),
(50, 5, 35, 45),
(20, 10, 650, 800),
(20, 1, 7, 15),
]:
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=bucket_size, depth_modulo=modulo)
for phony_neighbor_port in random.sample(range(1_000_000), 10_000):
routing_table.add_or_update_node(DHTID.generate(), f'{LOCALHOST}:{phony_neighbor_port}')
for bucket in routing_table.buckets:
assert len(bucket.replacement_nodes) == 0 or len(bucket.nodes_to_endpoint) <= bucket.size
assert min_nbuckets <= len(routing_table.buckets) <= max_nbuckets, (
f"Unexpected number of buckets: {min_nbuckets} <= {len(routing_table.buckets)} <= {max_nbuckets}")
def test_routing_table_search():
for table_size, lower_active, upper_active in [
(10, 10, 10), (10_000, 800, 1100)
]:
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=20, depth_modulo=5)
num_added = 0
total_nodes = 0
for phony_neighbor_port in random.sample(range(1_000_000), table_size):
routing_table.add_or_update_node(DHTID.generate(), f'{LOCALHOST}:{phony_neighbor_port}')
new_total = sum(len(bucket.nodes_to_endpoint) for bucket in routing_table.buckets)
num_added += new_total > total_nodes
total_nodes = new_total
num_replacements = sum(len(bucket.replacement_nodes) for bucket in routing_table.buckets)
all_active_neighbors = list(chain(
*(bucket.nodes_to_endpoint.keys() for bucket in routing_table.buckets)
))
assert lower_active <= len(all_active_neighbors) <= upper_active
assert len(all_active_neighbors) == num_added
assert num_added + num_replacements == table_size
# random queries
for i in range(1000):
k = random.randint(1, 100)
query_id = DHTID.generate()
exclude = query_id if random.random() < 0.5 else None
our_knn, our_endpoints = zip(*routing_table.get_nearest_neighbors(query_id, k=k, exclude=exclude))
reference_knn = heapq.nsmallest(k, all_active_neighbors, key=query_id.xor_distance)
assert all(our == ref for our, ref in zip_longest(our_knn, reference_knn))
assert all(our_endpoint == routing_table[our_node]
for our_node, our_endpoint in zip(our_knn, our_endpoints))
# queries from table
for i in range(1000):
k = random.randint(1, 100)
query_id = random.choice(all_active_neighbors)
our_knn, our_endpoints = zip(*routing_table.get_nearest_neighbors(query_id, k=k, exclude=query_id))
reference_knn = heapq.nsmallest(k + 1, all_active_neighbors, key=query_id.xor_distance)
if query_id in reference_knn:
reference_knn.remove(query_id)
assert len(our_knn) == len(reference_knn)
assert all(query_id.xor_distance(our) == query_id.xor_distance(ref)
for our, ref in zip_longest(our_knn, reference_knn))
assert routing_table.get_nearest_neighbors(query_id, k=k, exclude=None)[0][0] == query_id
def shared_prefix(*strings: str):
for i in range(min(map(len, strings))):
if len(set(map(operator.itemgetter(i), strings))) != 1:
return strings[0][:i]
return min(strings, key=len)
| 46.42029 | 117 | 0.655948 | import random
import heapq
import operator
from itertools import chain, zip_longest
from hivemind import LOCALHOST
from hivemind.dht.routing import RoutingTable, DHTID
def test_ids_basic():
for i in range(100):
id1, id2 = DHTID.generate(), DHTID.generate()
assert DHTID.MIN <= id1 < DHTID.MAX and DHTID.MIN <= id2 <= DHTID.MAX
assert DHTID.xor_distance(id1, id1) == DHTID.xor_distance(id2, id2) == 0
assert DHTID.xor_distance(id1, id2) > 0 or (id1 == id2)
assert DHTID.from_bytes(bytes(id1)) == id1 and DHTID.from_bytes(id2.to_bytes()) == id2
def test_ids_depth():
for i in range(100):
ids = [random.randint(0, 4096) for i in range(random.randint(1, 256))]
ours = DHTID.longest_common_prefix_length(*map(DHTID, ids))
ids_bitstr = [
"".join(bin(bite)[2:].rjust(8, '0') for bite in uid.to_bytes(20, 'big'))
for uid in ids
]
reference = len(shared_prefix(*ids_bitstr))
assert reference == ours, f"ours {ours} != reference {reference}, ids: {ids}"
def test_routing_table_basic():
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=20, depth_modulo=5)
added_nodes = []
for phony_neighbor_port in random.sample(range(10000), 100):
phony_id = DHTID.generate()
routing_table.add_or_update_node(phony_id, f'{LOCALHOST}:{phony_neighbor_port}')
assert phony_id in routing_table
assert f'{LOCALHOST}:{phony_neighbor_port}' in routing_table
assert routing_table[phony_id] == f'{LOCALHOST}:{phony_neighbor_port}'
assert routing_table[f'{LOCALHOST}:{phony_neighbor_port}'] == phony_id
added_nodes.append(phony_id)
assert routing_table.buckets[0].lower == DHTID.MIN and routing_table.buckets[-1].upper == DHTID.MAX
for bucket in routing_table.buckets:
assert len(bucket.replacement_nodes) == 0, "There should be no replacement nodes in a table with 100 entries"
assert 3 <= len(routing_table.buckets) <= 10, len(routing_table.buckets)
random_node = random.choice(added_nodes)
assert routing_table.get(node_id=random_node) == routing_table[random_node]
dummy_node = DHTID.generate()
assert (dummy_node not in routing_table) == (routing_table.get(node_id=dummy_node) is None)
for node in added_nodes:
found_bucket_index = routing_table.get_bucket_index(node)
for bucket_index, bucket in enumerate(routing_table.buckets):
if bucket.lower <= node < bucket.upper:
break
else:
raise ValueError("Naive search could not find bucket. Universe has gone crazy.")
assert bucket_index == found_bucket_index
def test_routing_table_parameters():
for (bucket_size, modulo, min_nbuckets, max_nbuckets) in [
(20, 5, 45, 65),
(50, 5, 35, 45),
(20, 10, 650, 800),
(20, 1, 7, 15),
]:
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=bucket_size, depth_modulo=modulo)
for phony_neighbor_port in random.sample(range(1_000_000), 10_000):
routing_table.add_or_update_node(DHTID.generate(), f'{LOCALHOST}:{phony_neighbor_port}')
for bucket in routing_table.buckets:
assert len(bucket.replacement_nodes) == 0 or len(bucket.nodes_to_endpoint) <= bucket.size
assert min_nbuckets <= len(routing_table.buckets) <= max_nbuckets, (
f"Unexpected number of buckets: {min_nbuckets} <= {len(routing_table.buckets)} <= {max_nbuckets}")
def test_routing_table_search():
for table_size, lower_active, upper_active in [
(10, 10, 10), (10_000, 800, 1100)
]:
node_id = DHTID.generate()
routing_table = RoutingTable(node_id, bucket_size=20, depth_modulo=5)
num_added = 0
total_nodes = 0
for phony_neighbor_port in random.sample(range(1_000_000), table_size):
routing_table.add_or_update_node(DHTID.generate(), f'{LOCALHOST}:{phony_neighbor_port}')
new_total = sum(len(bucket.nodes_to_endpoint) for bucket in routing_table.buckets)
num_added += new_total > total_nodes
total_nodes = new_total
num_replacements = sum(len(bucket.replacement_nodes) for bucket in routing_table.buckets)
all_active_neighbors = list(chain(
*(bucket.nodes_to_endpoint.keys() for bucket in routing_table.buckets)
))
assert lower_active <= len(all_active_neighbors) <= upper_active
assert len(all_active_neighbors) == num_added
assert num_added + num_replacements == table_size
for i in range(1000):
k = random.randint(1, 100)
query_id = DHTID.generate()
exclude = query_id if random.random() < 0.5 else None
our_knn, our_endpoints = zip(*routing_table.get_nearest_neighbors(query_id, k=k, exclude=exclude))
reference_knn = heapq.nsmallest(k, all_active_neighbors, key=query_id.xor_distance)
assert all(our == ref for our, ref in zip_longest(our_knn, reference_knn))
assert all(our_endpoint == routing_table[our_node]
for our_node, our_endpoint in zip(our_knn, our_endpoints))
for i in range(1000):
k = random.randint(1, 100)
query_id = random.choice(all_active_neighbors)
our_knn, our_endpoints = zip(*routing_table.get_nearest_neighbors(query_id, k=k, exclude=query_id))
reference_knn = heapq.nsmallest(k + 1, all_active_neighbors, key=query_id.xor_distance)
if query_id in reference_knn:
reference_knn.remove(query_id)
assert len(our_knn) == len(reference_knn)
assert all(query_id.xor_distance(our) == query_id.xor_distance(ref)
for our, ref in zip_longest(our_knn, reference_knn))
assert routing_table.get_nearest_neighbors(query_id, k=k, exclude=None)[0][0] == query_id
def shared_prefix(*strings: str):
for i in range(min(map(len, strings))):
if len(set(map(operator.itemgetter(i), strings))) != 1:
return strings[0][:i]
return min(strings, key=len)
| true | true |
1c3ccfbb53dbc60e060b28aa5322ce818df6222c | 16,660 | py | Python | segmentation/DDRNet_23_slim_eval_speed.py | ydhongHIT/DDRNet | f2f91b4053831fd54b04e30f60c9f1d4b55cd5b9 | [
"MIT"
] | 225 | 2021-02-24T06:59:40.000Z | 2022-03-30T10:23:47.000Z | segmentation/DDRNet_23_slim_eval_speed.py | scott-mao/DDRNet | f2f91b4053831fd54b04e30f60c9f1d4b55cd5b9 | [
"MIT"
] | 22 | 2021-02-24T07:13:24.000Z | 2022-03-24T10:01:43.000Z | segmentation/DDRNet_23_slim_eval_speed.py | scott-mao/DDRNet | f2f91b4053831fd54b04e30f60c9f1d4b55cd5b9 | [
"MIT"
] | 45 | 2021-02-24T08:58:53.000Z | 2022-03-25T02:10:44.000Z | import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from collections import OrderedDict
BatchNorm2d = nn.BatchNorm2d
bn_mom = 0.1
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes, momentum=bn_mom)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes, momentum=bn_mom)
self.downsample = downsample
self.stride = stride
self.no_relu = no_relu
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.no_relu:
return out
else:
return self.relu(out)
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = BatchNorm2d(planes, momentum=bn_mom)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = BatchNorm2d(planes, momentum=bn_mom)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=True)
self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.no_relu = no_relu
def forward(self, x):
residual = x
out = self.conv1(x)
#out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
#out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
#out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.no_relu:
return out
else:
return self.relu(out)
class DAPPM(nn.Module):
def __init__(self, inplanes, branch_planes, outplanes):
super(DAPPM, self).__init__()
self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=9, stride=4, padding=4),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale4 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale0 = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.process1 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process2 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process3 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process4 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.compression = nn.Sequential(
BatchNorm2d(branch_planes * 5, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes * 5, outplanes, kernel_size=1, bias=False),
)
self.shortcut = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),
)
def forward(self, x):
#x = self.downsample(x)
width = x.shape[-1]
height = x.shape[-2]
x_list = []
x_list.append(self.scale0(x))
x_list.append(self.process1((F.interpolate(self.scale1(x),
size=[height, width],
mode='bilinear')+x_list[0])))
x_list.append((self.process2((F.interpolate(self.scale2(x),
size=[height, width],
mode='bilinear')+x_list[1]))))
x_list.append(self.process3((F.interpolate(self.scale3(x),
size=[height, width],
mode='bilinear')+x_list[2])))
x_list.append(self.process4((F.interpolate(self.scale4(x),
size=[height, width],
mode='bilinear')+x_list[3])))
out = self.compression(torch.cat(x_list, 1)) + self.shortcut(x)
return out
class segmenthead(nn.Module):
def __init__(self, inplanes, interplanes, outplanes, scale_factor=8):
super(segmenthead, self).__init__()
self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)
self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)
#self.bn2 = BatchNorm2d(interplanes, momentum=bn_mom)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)
self.scale_factor = scale_factor
def forward(self, x):
x = self.conv1(self.relu(self.bn1(x)))
out = self.conv2(self.relu(x))
if self.scale_factor is not None:
height = x.shape[-2] * self.scale_factor
width = x.shape[-1] * self.scale_factor
out = F.interpolate(out,
size=[height, width],
mode='bilinear')
return out
class DualResNet(nn.Module):
def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=False):
super(DualResNet, self).__init__()
highres_planes = planes * 2
self.augment = augment
self.conv1 = nn.Sequential(
nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),
#BatchNorm2d(planes, momentum=bn_mom),
nn.ReLU(inplace=True),
)
self.relu = nn.ReLU(inplace=False)
self.layer1 = self._make_layer(block, planes, planes, layers[0])
self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)
self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)
self.compression3 = nn.Sequential(
nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=True),
#BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.compression4 = nn.Sequential(
nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=True),
#BatchNorm2d(highres_planes, momentum=bn_mom),
)
self.down3 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),
#BatchNorm2d(planes * 4, momentum=bn_mom),
)
self.down4 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),
#BatchNorm2d(planes * 4, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=True),
#BatchNorm2d(planes * 8, momentum=bn_mom),
)
self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)
self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)
self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)
self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1, stride=2)
self.spp = DAPPM(planes * 16, spp_planes, planes * 4)
if self.augment:
self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes)
self.final_layer = segmenthead(planes * 4, head_planes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
#nn.BatchNorm2d(planes * block.expansion, momentum=bn_mom),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
if i == (blocks-1):
layers.append(block(inplanes, planes, stride=1, no_relu=True))
else:
layers.append(block(inplanes, planes, stride=1, no_relu=False))
return nn.Sequential(*layers)
def forward(self, x):
width_output = x.shape[-1] // 8
height_output = x.shape[-2] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(self.relu(x))
layers.append(x)
x = self.layer3(self.relu(x))
layers.append(x)
x_ = self.layer3_(self.relu(layers[1]))
x = x + self.down3(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression3(self.relu(layers[2])),
size=[height_output, width_output],
mode='bilinear')
if self.augment:
temp = x_
x = self.layer4(self.relu(x))
layers.append(x)
x_ = self.layer4_(self.relu(x_))
x = x + self.down4(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression4(self.relu(layers[3])),
size=[height_output, width_output],
mode='bilinear')
x_ = self.layer5_(self.relu(x_))
x = F.interpolate(
self.spp(self.layer5(self.relu(x))),
size=[height_output, width_output],
mode='bilinear')
x_ = self.final_layer(x + x_)
if self.augment:
x_extra = self.seghead_extra(temp)
return [x_, x_extra]
else:
return x_
def DualResNet_imagenet(pretrained=False):
model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)
if pretrained:
checkpoint = torch.load('/home/user1/hyd/HRNet/' + "DDRNet23s_imagenet.pth", map_location='cpu')
'''
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict'].items():
name = k[7:]
new_state_dict[name] = v
#model_dict.update(new_state_dict)
#model.load_state_dict(model_dict)
'''
model.load_state_dict(new_state_dict, strict = False)
return model
def get_seg_model(cfg, **kwargs):
model = DualResNet_imagenet(pretrained=False)
return model
if __name__ == '__main__':
import time
device = torch.device('cuda')
#torch.backends.cudnn.enabled = True
#torch.backends.cudnn.benchmark = True
model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64)
model.eval()
model.to(device)
iterations = None
input = torch.randn(1, 3, 1024, 2048).cuda()
with torch.no_grad():
for _ in range(10):
model(input)
if iterations is None:
elapsed_time = 0
iterations = 100
while elapsed_time < 1:
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
iterations *= 2
FPS = iterations / elapsed_time
iterations = int(FPS * 6)
print('=========Speed Testing=========')
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations * 1000
torch.cuda.empty_cache()
FPS = 1000 / latency
print(FPS)
| 40.338983 | 122 | 0.509064 | import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
from collections import OrderedDict
BatchNorm2d = nn.BatchNorm2d
bn_mom = 0.1
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=False):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes, momentum=bn_mom)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes, momentum=bn_mom)
self.downsample = downsample
self.stride = stride
self.no_relu = no_relu
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.no_relu:
return out
else:
return self.relu(out)
class Bottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, downsample=None, no_relu=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = BatchNorm2d(planes, momentum=bn_mom)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = BatchNorm2d(planes, momentum=bn_mom)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=True)
self.bn3 = BatchNorm2d(planes * self.expansion, momentum=bn_mom)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.no_relu = no_relu
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.no_relu:
return out
else:
return self.relu(out)
class DAPPM(nn.Module):
def __init__(self, inplanes, branch_planes, outplanes):
super(DAPPM, self).__init__()
self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=9, stride=4, padding=4),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale4 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.scale0 = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False),
)
self.process1 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process2 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process3 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.process4 = nn.Sequential(
BatchNorm2d(branch_planes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False),
)
self.compression = nn.Sequential(
BatchNorm2d(branch_planes * 5, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(branch_planes * 5, outplanes, kernel_size=1, bias=False),
)
self.shortcut = nn.Sequential(
BatchNorm2d(inplanes, momentum=bn_mom),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False),
)
def forward(self, x):
width = x.shape[-1]
height = x.shape[-2]
x_list = []
x_list.append(self.scale0(x))
x_list.append(self.process1((F.interpolate(self.scale1(x),
size=[height, width],
mode='bilinear')+x_list[0])))
x_list.append((self.process2((F.interpolate(self.scale2(x),
size=[height, width],
mode='bilinear')+x_list[1]))))
x_list.append(self.process3((F.interpolate(self.scale3(x),
size=[height, width],
mode='bilinear')+x_list[2])))
x_list.append(self.process4((F.interpolate(self.scale4(x),
size=[height, width],
mode='bilinear')+x_list[3])))
out = self.compression(torch.cat(x_list, 1)) + self.shortcut(x)
return out
class segmenthead(nn.Module):
def __init__(self, inplanes, interplanes, outplanes, scale_factor=8):
super(segmenthead, self).__init__()
self.bn1 = BatchNorm2d(inplanes, momentum=bn_mom)
self.conv1 = nn.Conv2d(inplanes, interplanes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(interplanes, outplanes, kernel_size=1, padding=0, bias=True)
self.scale_factor = scale_factor
def forward(self, x):
x = self.conv1(self.relu(self.bn1(x)))
out = self.conv2(self.relu(x))
if self.scale_factor is not None:
height = x.shape[-2] * self.scale_factor
width = x.shape[-1] * self.scale_factor
out = F.interpolate(out,
size=[height, width],
mode='bilinear')
return out
class DualResNet(nn.Module):
def __init__(self, block, layers, num_classes=19, planes=64, spp_planes=128, head_planes=128, augment=False):
super(DualResNet, self).__init__()
highres_planes = planes * 2
self.augment = augment
self.conv1 = nn.Sequential(
nn.Conv2d(3,planes,kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(planes,planes,kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),
)
self.relu = nn.ReLU(inplace=False)
self.layer1 = self._make_layer(block, planes, planes, layers[0])
self.layer2 = self._make_layer(block, planes, planes * 2, layers[1], stride=2)
self.layer3 = self._make_layer(block, planes * 2, planes * 4, layers[2], stride=2)
self.layer4 = self._make_layer(block, planes * 4, planes * 8, layers[3], stride=2)
self.compression3 = nn.Sequential(
nn.Conv2d(planes * 4, highres_planes, kernel_size=1, bias=True),
)
self.compression4 = nn.Sequential(
nn.Conv2d(planes * 8, highres_planes, kernel_size=1, bias=True),
)
self.down3 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),
)
self.down4 = nn.Sequential(
nn.Conv2d(highres_planes, planes * 4, kernel_size=3, stride=2, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(planes * 4, planes * 8, kernel_size=3, stride=2, padding=1, bias=True),
)
self.layer3_ = self._make_layer(block, planes * 2, highres_planes, 2)
self.layer4_ = self._make_layer(block, highres_planes, highres_planes, 2)
self.layer5_ = self._make_layer(Bottleneck, highres_planes, highres_planes, 1)
self.layer5 = self._make_layer(Bottleneck, planes * 8, planes * 8, 1, stride=2)
self.spp = DAPPM(planes * 16, spp_planes, planes * 4)
if self.augment:
self.seghead_extra = segmenthead(highres_planes, head_planes, num_classes)
self.final_layer = segmenthead(planes * 4, head_planes, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, inplanes, planes, blocks, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
)
layers = []
layers.append(block(inplanes, planes, stride, downsample))
inplanes = planes * block.expansion
for i in range(1, blocks):
if i == (blocks-1):
layers.append(block(inplanes, planes, stride=1, no_relu=True))
else:
layers.append(block(inplanes, planes, stride=1, no_relu=False))
return nn.Sequential(*layers)
def forward(self, x):
width_output = x.shape[-1] // 8
height_output = x.shape[-2] // 8
layers = []
x = self.conv1(x)
x = self.layer1(x)
layers.append(x)
x = self.layer2(self.relu(x))
layers.append(x)
x = self.layer3(self.relu(x))
layers.append(x)
x_ = self.layer3_(self.relu(layers[1]))
x = x + self.down3(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression3(self.relu(layers[2])),
size=[height_output, width_output],
mode='bilinear')
if self.augment:
temp = x_
x = self.layer4(self.relu(x))
layers.append(x)
x_ = self.layer4_(self.relu(x_))
x = x + self.down4(self.relu(x_))
x_ = x_ + F.interpolate(
self.compression4(self.relu(layers[3])),
size=[height_output, width_output],
mode='bilinear')
x_ = self.layer5_(self.relu(x_))
x = F.interpolate(
self.spp(self.layer5(self.relu(x))),
size=[height_output, width_output],
mode='bilinear')
x_ = self.final_layer(x + x_)
if self.augment:
x_extra = self.seghead_extra(temp)
return [x_, x_extra]
else:
return x_
def DualResNet_imagenet(pretrained=False):
model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64, augment=True)
if pretrained:
checkpoint = torch.load('/home/user1/hyd/HRNet/' + "DDRNet23s_imagenet.pth", map_location='cpu')
model.load_state_dict(new_state_dict, strict = False)
return model
def get_seg_model(cfg, **kwargs):
model = DualResNet_imagenet(pretrained=False)
return model
if __name__ == '__main__':
import time
device = torch.device('cuda')
model = DualResNet(BasicBlock, [2, 2, 2, 2], num_classes=19, planes=32, spp_planes=128, head_planes=64)
model.eval()
model.to(device)
iterations = None
input = torch.randn(1, 3, 1024, 2048).cuda()
with torch.no_grad():
for _ in range(10):
model(input)
if iterations is None:
elapsed_time = 0
iterations = 100
while elapsed_time < 1:
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
iterations *= 2
FPS = iterations / elapsed_time
iterations = int(FPS * 6)
print('=========Speed Testing=========')
torch.cuda.synchronize()
torch.cuda.synchronize()
t_start = time.time()
for _ in range(iterations):
model(input)
torch.cuda.synchronize()
torch.cuda.synchronize()
elapsed_time = time.time() - t_start
latency = elapsed_time / iterations * 1000
torch.cuda.empty_cache()
FPS = 1000 / latency
print(FPS)
| true | true |
1c3ccffac17b54abc647f6fe26d87d0a19ca2a4d | 4,424 | py | Python | day14/script.py | kohakuma4m/AdventOfCode_2020 | 75d4908f7b6b89a9a2a2af0097fe8fb450b260a3 | [
"MIT"
] | null | null | null | day14/script.py | kohakuma4m/AdventOfCode_2020 | 75d4908f7b6b89a9a2a2af0097fe8fb450b260a3 | [
"MIT"
] | null | null | null | day14/script.py | kohakuma4m/AdventOfCode_2020 | 75d4908f7b6b89a9a2a2af0097fe8fb450b260a3 | [
"MIT"
] | null | null | null | import sys; sys.path.append('../common')
import mylib as utils # pylint: disable=import-error
import re
from itertools import product
# Read args
filename = 'input.txt' if len(sys.argv) == 1 else sys.argv[1]
print(filename, '\n')
###########################
# region COMMON
MASK_REGEX = re.compile(r'^mask = (.+)$')
INSTRUCTION_REGEX = re.compile(r'^mem\[(\d+)\] = (\d+)$')
def applyMask(value: int, mask: str) -> int:
binaryValue = '%s' % f'{value:b}' # String of binary value
maskedValue = ''
for i in range(0, len(mask)):
if i >= len(binaryValue): # Masked
maskedValue = (mask[-1-i] if mask[-1-i] != 'X' else '0') + maskedValue
elif mask[-1-i] != 'X': # Masked
maskedValue = mask[-1-i] + maskedValue
else: # Unchanged
maskedValue = binaryValue[-1-i] + maskedValue
return int(maskedValue, 2) # int to binary conversion
def applyMask2(value: int, mask: str) -> str:
binaryValue = '%s' % f'{value:b}' # String of binary value
maskedValue = ''
for i in range(0, len(mask)):
if i >= len(binaryValue): # Masked
maskedValue = mask[-1-i] + maskedValue
elif mask[-1-i] != '0': # Masked
maskedValue = mask[-1-i] + maskedValue
else: # Unchanged
maskedValue = binaryValue[-1-i] + maskedValue
return maskedValue
def getAllFloatingAddresses(maskedAddress: str) -> list:
floatingBitPositions = [idx for idx, c in enumerate(maskedAddress) if c == 'X']
nbFloatingBits = len(floatingBitPositions)
# 2^n combinations (n = nbFloatingBits)
floatingBitValues = product([0, 1], repeat=nbFloatingBits)
addresses = []
for values in floatingBitValues:
floatingAddress = ''
positionsValueIndex = { floatingBitPositions[i]: str(values[i]) for i in range(0, nbFloatingBits) }
for idx in range(0, len(maskedAddress)):
floatingAddress += positionsValueIndex[idx] if idx in floatingBitPositions else maskedAddress[idx]
addresses.append(int(floatingAddress, 2)) # int to binary conversion
return addresses
# Much faster version (constructing resulted address with product directly)
def getAllFloatingAddresses2(maskedAddress: str) -> list:
options = [c if c != 'X' else ('0', '1') for c in maskedAddress]
return [int(''.join(o),2) for o in product(*options)] # 2^n combinations (n = nbFloatingBits)
class Program:
def __init__(self, instructions: list, memory: dict = {}):
self.mask = None
self.instructions = instructions
self.nbInstructions = len(instructions)
self.memory = memory
def init(self):
for line in self.instructions:
if line.startswith('mask'):
self.mask = MASK_REGEX.findall(line)[0]
else:
(idx, val) = INSTRUCTION_REGEX.findall(line)[0]
self.memory[int(idx)] = applyMask(int(val), self.mask) # Apply mask to value
def init2(self):
for line in self.instructions:
if line.startswith('mask'):
self.mask = MASK_REGEX.findall(line)[0]
else:
(idx, val) = INSTRUCTION_REGEX.findall(line)[0]
# Apply mask to index
address = applyMask2(int(idx), self.mask)
# Write to all floating memory address
for floatingAddress in getAllFloatingAddresses2(address):
self.memory[floatingAddress] = int(val)
def resetMemory(self):
self.mask = None
self.memory = {}
def getState(self):
return sum(self.memory.values())
def __str__(self):
separator = '#################'
memory = '\n'.join(['%2d --> val: %d' % (pos, val) for pos, val in sorted(self.memory.items())])
string = f'Number of instructions: {self.nbInstructions}\n\nCurrent memory:\n{memory}'
return f'{separator}\n{string}\n{separator}\n'
# endregion COMMON
###########################
###########################
# FETCH DATA
###########################
lines = utils.readFileLines(filename)
########
# PART 1
########
program = Program(lines)
program.init()
print(f'1) Sum of all memory values after initialization: {program.getState()}')
########
# PART 2
########
program.resetMemory()
program.init2()
print(f'2) Sum of all memory values after initialization (version 2): {program.getState()}') | 33.515152 | 110 | 0.59991 | import sys; sys.path.append('../common')
import mylib as utils
import re
from itertools import product
filename = 'input.txt' if len(sys.argv) == 1 else sys.argv[1]
print(filename, '\n')
skedValue
elif mask[-1-i] != 'X':
maskedValue = mask[-1-i] + maskedValue
else:
maskedValue = binaryValue[-1-i] + maskedValue
return int(maskedValue, 2)
def applyMask2(value: int, mask: str) -> str:
binaryValue = '%s' % f'{value:b}'
maskedValue = ''
for i in range(0, len(mask)):
if i >= len(binaryValue):
maskedValue = mask[-1-i] + maskedValue
elif mask[-1-i] != '0':
maskedValue = mask[-1-i] + maskedValue
else:
maskedValue = binaryValue[-1-i] + maskedValue
return maskedValue
def getAllFloatingAddresses(maskedAddress: str) -> list:
floatingBitPositions = [idx for idx, c in enumerate(maskedAddress) if c == 'X']
nbFloatingBits = len(floatingBitPositions)
floatingBitValues = product([0, 1], repeat=nbFloatingBits)
addresses = []
for values in floatingBitValues:
floatingAddress = ''
positionsValueIndex = { floatingBitPositions[i]: str(values[i]) for i in range(0, nbFloatingBits) }
for idx in range(0, len(maskedAddress)):
floatingAddress += positionsValueIndex[idx] if idx in floatingBitPositions else maskedAddress[idx]
addresses.append(int(floatingAddress, 2))
return addresses
def getAllFloatingAddresses2(maskedAddress: str) -> list:
options = [c if c != 'X' else ('0', '1') for c in maskedAddress]
return [int(''.join(o),2) for o in product(*options)]
class Program:
def __init__(self, instructions: list, memory: dict = {}):
self.mask = None
self.instructions = instructions
self.nbInstructions = len(instructions)
self.memory = memory
def init(self):
for line in self.instructions:
if line.startswith('mask'):
self.mask = MASK_REGEX.findall(line)[0]
else:
(idx, val) = INSTRUCTION_REGEX.findall(line)[0]
self.memory[int(idx)] = applyMask(int(val), self.mask)
def init2(self):
for line in self.instructions:
if line.startswith('mask'):
self.mask = MASK_REGEX.findall(line)[0]
else:
(idx, val) = INSTRUCTION_REGEX.findall(line)[0]
address = applyMask2(int(idx), self.mask)
for floatingAddress in getAllFloatingAddresses2(address):
self.memory[floatingAddress] = int(val)
def resetMemory(self):
self.mask = None
self.memory = {}
def getState(self):
return sum(self.memory.values())
def __str__(self):
separator = '#################'
memory = '\n'.join(['%2d --> val: %d' % (pos, val) for pos, val in sorted(self.memory.items())])
string = f'Number of instructions: {self.nbInstructions}\n\nCurrent memory:\n{memory}'
return f'{separator}\n{string}\n{separator}\n'
| true | true |
1c3cd01086fc2b27eca50ee11e7c335c95b6ca7d | 3,535 | py | Python | main.py | colspan/wikipedia-ja-word2vec | c39429cd23d4c39d48b6fee85b65a15d2d3fef58 | [
"MIT"
] | null | null | null | main.py | colspan/wikipedia-ja-word2vec | c39429cd23d4c39d48b6fee85b65a15d2d3fef58 | [
"MIT"
] | null | null | null | main.py | colspan/wikipedia-ja-word2vec | c39429cd23d4c39d48b6fee85b65a15d2d3fef58 | [
"MIT"
] | 1 | 2017-03-12T16:35:20.000Z | 2017-03-12T16:35:20.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import re
import logging
import argparse
from gensim.models import word2vec
from luigi.format import Nop
import requests
import luigi
from utils import MecabSplitter, NoWakatiSplitter # JumanPPSplitter,
class DownloadWikipediaDump(luigi.Task):
"""
Wikipediaのダンプデータをダウンロードする
"""
url = (
"https://dumps.wikimedia.org/jawiki/latest/jawiki-latest-pages-articles.xml.bz2"
)
def output(self):
return luigi.LocalTarget(
"downloads/jawiki-latest-pages-articles.xml.bz2", format=Nop
)
def run(self):
r = requests.get(self.url, stream=True)
with self.output().open("wb") as f_out:
for chunk in r.iter_content(chunk_size=1024):
f_out.write(chunk)
class DecompressWikipediaDump(luigi.Task):
"""
ダンプファイルの圧縮を展開
"""
def requires(self):
return DownloadWikipediaDump()
def output(self):
return luigi.LocalTarget("var/jawiki-latest-pages-articles.xml")
def run(self):
import os
with self.output().temporary_path() as temp_output_path:
args = ["bunzip2", "-c", self.input().path, ">", temp_output_path]
os.system(" ".join(args))
class ParseWikipediaDump(luigi.Task):
"""
ダウンロードしたWikipediaのデータをパースする
参考 : http://taka-say.hateblo.jp/entry/2016/05/20/221817
"""
def requires(self):
return DecompressWikipediaDump()
def output(self):
return luigi.LocalTarget("var/wikipedia_extracted")
def run(self):
import os
args = [
"wikiextractor",
"-b",
"20M",
"-o",
self.output().path,
self.input().path,
]
print(" ".join(args))
os.system(" ".join(args))
class SplitWords(luigi.Task):
"""
パースしたWikipediaの文章を分かち書きする
"""
splitter = luigi.Parameter(default="mecab")
def requires(self):
return ParseWikipediaDump()
def output(self):
return luigi.LocalTarget("var/split_{}_wikipedia.txt".format(self.splitter))
def run(self):
pattern = re.compile("<doc.*>|<\\/doc>")
if self.splitter == "mecab":
splitter = MecabSplitter()
# elif self.splitter == 'jumanpp':
# splitter = JumanPPSplitter()
else:
splitter = NoWakatiSplitter()
with self.output().open("w") as f_output:
for source in glob.iglob(self.input().path + "/*/wiki*"):
with open(source, "r") as f_input:
for line in f_input:
if pattern.match(line) or len(line) == 1:
continue
words = splitter.split(line)
f_output.write(" ".join(words) + "\n")
class TrainWord2VecModel(luigi.Task):
"""
Word2Vecのモデルを学習する
"""
splitter = luigi.Parameter(default="mecab")
def requires(self):
return SplitWords(splitter=self.splitter)
def output(self):
return luigi.LocalTarget("var/wikipedia_{}.model".format(self.splitter))
def run(self):
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
sentences = word2vec.Text8Corpus(self.input().path)
model = word2vec.Word2Vec(sentences, vector_size=200, min_count=20, window=15)
model.save(self.output().path)
if __name__ == "__main__":
luigi.run()
| 25.25 | 88 | 0.587836 |
import glob
import re
import logging
import argparse
from gensim.models import word2vec
from luigi.format import Nop
import requests
import luigi
from utils import MecabSplitter, NoWakatiSplitter
class DownloadWikipediaDump(luigi.Task):
url = (
"https://dumps.wikimedia.org/jawiki/latest/jawiki-latest-pages-articles.xml.bz2"
)
def output(self):
return luigi.LocalTarget(
"downloads/jawiki-latest-pages-articles.xml.bz2", format=Nop
)
def run(self):
r = requests.get(self.url, stream=True)
with self.output().open("wb") as f_out:
for chunk in r.iter_content(chunk_size=1024):
f_out.write(chunk)
class DecompressWikipediaDump(luigi.Task):
def requires(self):
return DownloadWikipediaDump()
def output(self):
return luigi.LocalTarget("var/jawiki-latest-pages-articles.xml")
def run(self):
import os
with self.output().temporary_path() as temp_output_path:
args = ["bunzip2", "-c", self.input().path, ">", temp_output_path]
os.system(" ".join(args))
class ParseWikipediaDump(luigi.Task):
def requires(self):
return DecompressWikipediaDump()
def output(self):
return luigi.LocalTarget("var/wikipedia_extracted")
def run(self):
import os
args = [
"wikiextractor",
"-b",
"20M",
"-o",
self.output().path,
self.input().path,
]
print(" ".join(args))
os.system(" ".join(args))
class SplitWords(luigi.Task):
splitter = luigi.Parameter(default="mecab")
def requires(self):
return ParseWikipediaDump()
def output(self):
return luigi.LocalTarget("var/split_{}_wikipedia.txt".format(self.splitter))
def run(self):
pattern = re.compile("<doc.*>|<\\/doc>")
if self.splitter == "mecab":
splitter = MecabSplitter()
else:
splitter = NoWakatiSplitter()
with self.output().open("w") as f_output:
for source in glob.iglob(self.input().path + "/*/wiki*"):
with open(source, "r") as f_input:
for line in f_input:
if pattern.match(line) or len(line) == 1:
continue
words = splitter.split(line)
f_output.write(" ".join(words) + "\n")
class TrainWord2VecModel(luigi.Task):
splitter = luigi.Parameter(default="mecab")
def requires(self):
return SplitWords(splitter=self.splitter)
def output(self):
return luigi.LocalTarget("var/wikipedia_{}.model".format(self.splitter))
def run(self):
logging.basicConfig(
format="%(asctime)s : %(levelname)s : %(message)s", level=logging.INFO
)
sentences = word2vec.Text8Corpus(self.input().path)
model = word2vec.Word2Vec(sentences, vector_size=200, min_count=20, window=15)
model.save(self.output().path)
if __name__ == "__main__":
luigi.run()
| true | true |
1c3cd0aae63d0ff3904d68a0016ea5b819637691 | 11,284 | py | Python | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/herald/transports/xmpp/transport.py | isandlaTech/cohorte-demos | 1d958b2bee33f79a0f1518b3832ef8a52b9a4bc0 | [
"Apache-2.0"
] | 1 | 2017-03-05T18:42:02.000Z | 2017-03-05T18:42:02.000Z | led/dump/led-demo-raspberry/cohorte/dist/cohorte-1.0.0-20141209.234423-41-python-distribution/repo/herald/transports/xmpp/transport.py | isandlaTech/cohorte-demos | 1d958b2bee33f79a0f1518b3832ef8a52b9a4bc0 | [
"Apache-2.0"
] | 2 | 2015-05-26T09:08:47.000Z | 2015-08-11T15:08:01.000Z | led/dump/led-demo-yun/cohorte/dist/cohorte-1.0.0-20141216.234517-57-python-distribution/repo/herald/transports/xmpp/transport.py | isandlaTech/cohorte-demos | 1d958b2bee33f79a0f1518b3832ef8a52b9a4bc0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Herald XMPP transport implementation
:author: Thomas Calmant
:copyright: Copyright 2014, isandlaTech
:license: Apache License 2.0
:version: 0.0.2
:status: Alpha
..
Copyright 2014 isandlaTech
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Module version
__version_info__ = (0, 0, 2)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
# Herald XMPP
from . import FACTORY_TRANSPORT, SERVICE_XMPP_DIRECTORY, ACCESS_ID, \
PROP_XMPP_SERVER, PROP_XMPP_PORT, PROP_MONITOR_JID, PROP_MONITOR_KEY, \
PROP_XMPP_ROOM_JID
from .beans import XMPPAccess
from .bot import HeraldBot
# Herald Core
from herald.exceptions import InvalidPeerAccess
import herald
import herald.beans as beans
import herald.utils as utils
# XMPP
import sleekxmpp
# Pelix
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, Validate, Invalidate
import pelix.misc.jabsorb as jabsorb
# Standard library
import json
import logging
# ------------------------------------------------------------------------------
_logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
@ComponentFactory(FACTORY_TRANSPORT)
@Requires('_core', herald.SERVICE_HERALD_INTERNAL)
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Requires('_xmpp_directory', SERVICE_XMPP_DIRECTORY)
@Provides(herald.SERVICE_TRANSPORT, '_controller')
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Property('_host', PROP_XMPP_SERVER, 'localhost')
@Property('_port', PROP_XMPP_PORT, 5222)
@Property('_monitor_jid', PROP_MONITOR_JID)
@Property('_key', PROP_MONITOR_KEY)
@Property('_room', PROP_XMPP_ROOM_JID)
class XmppTransport(object):
"""
XMPP Messenger for Herald.
"""
def __init__(self):
"""
Sets up the transport
"""
# Herald core service
self._core = None
# Herald Core directory
self._directory = None
# Herald XMPP directory
self._xmpp_directory = None
# Service controller
self._controller = False
# Properties
self._access_id = ACCESS_ID
self._host = "localhost"
self._port = 5222
self._monitor_jid = None
self._key = None
self._room = None
# MUC service
self._muc_domain = None
# XMPP bot
self._bot = HeraldBot()
@Validate
def _validate(self, _):
"""
Component validated
"""
# Ensure we do not provide the service at first
self._controller = False
# Compute the MUC domain
self._muc_domain = sleekxmpp.JID(self._room).domain
# Register to session events
self._bot.add_event_handler("session_start", self.__on_start)
self._bot.add_event_handler("session_end", self.__on_end)
self._bot.add_event_handler("muc::{0}::got_online".format(self._room),
self.__room_in)
self._bot.add_event_handler("muc::{0}::got_offline".format(self._room),
self.__room_out)
# Register "XEP-0203: Delayed Delivery" plug-in
self._bot.register_plugin("xep_0203")
# Register to messages (loop back filtered by the bot)
self._bot.set_message_callback(self.__on_message)
# Connect to the server
self._bot.connect(self._host, self._port)
@Invalidate
def _invalidate(self, _):
"""
Component invalidated
"""
# Disconnect the bot and clear callbacks
self._bot.disconnect()
self._bot.set_message_callback(None)
self._bot.del_event_handler("session_start", self.__on_start)
self._bot.del_event_handler("session_end", self.__on_end)
def __on_start(self, _):
"""
XMPP session started
"""
# Log our JID
_logger.info("Bot connected with JID: %s", self._bot.boundjid.bare)
# Get our local peer description
peer = self._directory.get_local_peer()
# Ask the monitor to invite us, using our UID as nickname
_logger.info("Requesting to join %s", self._monitor_jid)
self._bot.herald_join(peer.uid, self._monitor_jid, self._key,
peer.groups)
def __on_message(self, msg):
"""
Received an XMPP message
:param msg: A message stanza
"""
subject = msg['subject']
if not subject:
# No subject: not an Herald message. Abandon.
return
if msg['delay']['stamp'] is not None:
# Delayed message: ignore
return
# Check if the message is from Multi-User Chat or direct
muc_message = (msg['type'] == 'groupchat') \
or (msg['from'].domain == self._muc_domain)
sender_jid = msg['from'].full
try:
if muc_message:
# Group message: resource is the isolate UID
sender_uid = msg['from'].resource
else:
sender_uid = self._xmpp_directory.from_jid(sender_jid)
except KeyError:
sender_uid = "<unknown>"
try:
content = jabsorb.from_jabsorb(json.loads(msg['body']))
except ValueError:
# Content can't be decoded, use its string representation as is
content = msg['body']
uid = msg['thread']
reply_to = msg['parent_thread']
# Extra parameters, for a reply
extra = {"parent_uid": uid,
"sender_jid": sender_jid}
# Call back the core service
message = beans.MessageReceived(uid, subject, content, sender_uid,
reply_to, self._access_id, extra=extra)
self._core.handle_message(message)
def __on_end(self, _):
"""
XMPP session ended
"""
# Clean up our access
self._directory.get_local_peer().unset_access(self._access_id)
# Shut down the service
self._controller = False
def __room_in(self, data):
"""
Someone entered the main room
:param data: MUC presence stanza
"""
uid = data['from'].resource
room_jid = data['from'].bare
local_peer = self._directory.get_local_peer()
if uid == local_peer.uid and room_jid == self._room:
# We're on line, in the main room, register our service
self._controller = True
# Register our local access
local_peer.set_access(self._access_id,
XMPPAccess(self._bot.boundjid.full))
# Send the "new comer" message
message = beans.Message('herald/directory/newcomer',
local_peer.dump())
self.__send_message("groupchat", room_jid, message)
def __room_out(self, data):
"""
Someone exited the main room
:param data: MUC presence stanza
"""
uid = data['from'].resource
room_jid = data['from'].bare
if uid != self._directory.local_uid and room_jid == self._room:
# Someone else is leaving the main room: clean up the directory
try:
peer = self._directory.get_peer(uid)
peer.unset_access(ACCESS_ID)
except KeyError:
pass
def __send_message(self, msgtype, target, message, parent_uid=None):
"""
Prepares and sends a message over XMPP
:param msgtype: Kind of message (chat or groupchat)
:param target: Target JID or MUC room
:param message: Herald message bean
:param parent_uid: UID of the message this one replies to (optional)
"""
# Convert content to JSON
content = json.dumps(jabsorb.to_jabsorb(message.content),
default=utils.json_converter)
# Prepare an XMPP message, based on the Herald message
xmpp_msg = self._bot.make_message(mto=target,
mbody=content,
msubject=message.subject,
mtype=msgtype)
xmpp_msg['thread'] = message.uid
if parent_uid:
xmpp_msg['parent_thread'] = parent_uid
# Send it
xmpp_msg.send()
def __get_jid(self, peer, extra):
"""
Retrieves the JID to use to communicate with a peer
:param peer: A Peer bean or None
:param extra: The extra information for a reply or None
:return: The JID to use to reply, or None
"""
# Get JID from reply information
jid = None
if extra is not None:
jid = extra.get('sender_jid')
# Try to read information from the peer
if not jid and peer is not None:
try:
# Get the target JID
jid = peer.get_access(self._access_id).jid
except (KeyError, AttributeError):
pass
return jid
def fire(self, peer, message, extra=None):
"""
Fires a message to a peer
:param peer: A Peer bean
:param message: Message to send
:param extra: Extra information used in case of a reply
"""
# Get the request message UID, if any
parent_uid = None
if extra is not None:
parent_uid = extra.get('parent_uid')
# Try to read extra information
jid = self.__get_jid(peer, extra)
if jid:
# Send the XMPP message
self.__send_message("chat", jid, message, parent_uid)
else:
# No XMPP access description
raise InvalidPeerAccess(beans.Target(uid=peer.uid),
"No '{0}' access found"
.format(self._access_id))
def fire_group(self, group, peers, message):
"""
Fires a message to a group of peers
:param group: Name of a group
:param peers: Peers to communicate with
:param message: Message to send
:return: The list of reached peers
"""
# Special case for the main room
if group == 'all':
group_jid = self._room
else:
# Get the group JID
group_jid = sleekxmpp.JID(local=group, domain=self._muc_domain)
# Send the XMPP message
self.__send_message("groupchat", group_jid, message)
return peers
| 31.431755 | 80 | 0.588444 |
__version_info__ = (0, 0, 2)
__version__ = ".".join(str(x) for x in __version_info__)
__docformat__ = "restructuredtext en"
from . import FACTORY_TRANSPORT, SERVICE_XMPP_DIRECTORY, ACCESS_ID, \
PROP_XMPP_SERVER, PROP_XMPP_PORT, PROP_MONITOR_JID, PROP_MONITOR_KEY, \
PROP_XMPP_ROOM_JID
from .beans import XMPPAccess
from .bot import HeraldBot
from herald.exceptions import InvalidPeerAccess
import herald
import herald.beans as beans
import herald.utils as utils
import sleekxmpp
from pelix.ipopo.decorators import ComponentFactory, Requires, Provides, \
Property, Validate, Invalidate
import pelix.misc.jabsorb as jabsorb
import json
import logging
_logger = logging.getLogger(__name__)
@ComponentFactory(FACTORY_TRANSPORT)
@Requires('_core', herald.SERVICE_HERALD_INTERNAL)
@Requires('_directory', herald.SERVICE_DIRECTORY)
@Requires('_xmpp_directory', SERVICE_XMPP_DIRECTORY)
@Provides(herald.SERVICE_TRANSPORT, '_controller')
@Property('_access_id', herald.PROP_ACCESS_ID, ACCESS_ID)
@Property('_host', PROP_XMPP_SERVER, 'localhost')
@Property('_port', PROP_XMPP_PORT, 5222)
@Property('_monitor_jid', PROP_MONITOR_JID)
@Property('_key', PROP_MONITOR_KEY)
@Property('_room', PROP_XMPP_ROOM_JID)
class XmppTransport(object):
def __init__(self):
self._core = None
self._directory = None
self._xmpp_directory = None
self._controller = False
self._access_id = ACCESS_ID
self._host = "localhost"
self._port = 5222
self._monitor_jid = None
self._key = None
self._room = None
self._muc_domain = None
self._bot = HeraldBot()
@Validate
def _validate(self, _):
self._controller = False
self._muc_domain = sleekxmpp.JID(self._room).domain
self._bot.add_event_handler("session_start", self.__on_start)
self._bot.add_event_handler("session_end", self.__on_end)
self._bot.add_event_handler("muc::{0}::got_online".format(self._room),
self.__room_in)
self._bot.add_event_handler("muc::{0}::got_offline".format(self._room),
self.__room_out)
self._bot.register_plugin("xep_0203")
self._bot.set_message_callback(self.__on_message)
self._bot.connect(self._host, self._port)
@Invalidate
def _invalidate(self, _):
self._bot.disconnect()
self._bot.set_message_callback(None)
self._bot.del_event_handler("session_start", self.__on_start)
self._bot.del_event_handler("session_end", self.__on_end)
def __on_start(self, _):
_logger.info("Bot connected with JID: %s", self._bot.boundjid.bare)
peer = self._directory.get_local_peer()
_logger.info("Requesting to join %s", self._monitor_jid)
self._bot.herald_join(peer.uid, self._monitor_jid, self._key,
peer.groups)
def __on_message(self, msg):
subject = msg['subject']
if not subject:
return
if msg['delay']['stamp'] is not None:
return
muc_message = (msg['type'] == 'groupchat') \
or (msg['from'].domain == self._muc_domain)
sender_jid = msg['from'].full
try:
if muc_message:
sender_uid = msg['from'].resource
else:
sender_uid = self._xmpp_directory.from_jid(sender_jid)
except KeyError:
sender_uid = "<unknown>"
try:
content = jabsorb.from_jabsorb(json.loads(msg['body']))
except ValueError:
content = msg['body']
uid = msg['thread']
reply_to = msg['parent_thread']
# Extra parameters, for a reply
extra = {"parent_uid": uid,
"sender_jid": sender_jid}
# Call back the core service
message = beans.MessageReceived(uid, subject, content, sender_uid,
reply_to, self._access_id, extra=extra)
self._core.handle_message(message)
def __on_end(self, _):
# Clean up our access
self._directory.get_local_peer().unset_access(self._access_id)
# Shut down the service
self._controller = False
def __room_in(self, data):
uid = data['from'].resource
room_jid = data['from'].bare
local_peer = self._directory.get_local_peer()
if uid == local_peer.uid and room_jid == self._room:
# We're on line, in the main room, register our service
self._controller = True
local_peer.set_access(self._access_id,
XMPPAccess(self._bot.boundjid.full))
message = beans.Message('herald/directory/newcomer',
local_peer.dump())
self.__send_message("groupchat", room_jid, message)
def __room_out(self, data):
uid = data['from'].resource
room_jid = data['from'].bare
if uid != self._directory.local_uid and room_jid == self._room:
try:
peer = self._directory.get_peer(uid)
peer.unset_access(ACCESS_ID)
except KeyError:
pass
def __send_message(self, msgtype, target, message, parent_uid=None):
content = json.dumps(jabsorb.to_jabsorb(message.content),
default=utils.json_converter)
xmpp_msg = self._bot.make_message(mto=target,
mbody=content,
msubject=message.subject,
mtype=msgtype)
xmpp_msg['thread'] = message.uid
if parent_uid:
xmpp_msg['parent_thread'] = parent_uid
xmpp_msg.send()
def __get_jid(self, peer, extra):
jid = None
if extra is not None:
jid = extra.get('sender_jid')
if not jid and peer is not None:
try:
jid = peer.get_access(self._access_id).jid
except (KeyError, AttributeError):
pass
return jid
def fire(self, peer, message, extra=None):
parent_uid = None
if extra is not None:
parent_uid = extra.get('parent_uid')
jid = self.__get_jid(peer, extra)
if jid:
self.__send_message("chat", jid, message, parent_uid)
else:
raise InvalidPeerAccess(beans.Target(uid=peer.uid),
"No '{0}' access found"
.format(self._access_id))
def fire_group(self, group, peers, message):
if group == 'all':
group_jid = self._room
else:
group_jid = sleekxmpp.JID(local=group, domain=self._muc_domain)
self.__send_message("groupchat", group_jid, message)
return peers
| true | true |
1c3cd2713983288e908beb1ab1b81ac543055392 | 469 | py | Python | test/test_event.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | test/test_event.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | test/test_event.py | pygitee/pygitee | 7622314a4dbb08cf2f729b6cdd0a2887b96e394e | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
import unittest
class TestEvent(unittest.TestCase):
"""Event unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testEvent(self):
"""Test Event"""
# FIXME: construct object with mandatory attributes with example values
# model = gitee.models.event.Event() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 17.37037 | 79 | 0.628998 |
from __future__ import absolute_import
import unittest
class TestEvent(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testEvent(self):
s
if __name__ == '__main__':
unittest.main()
| true | true |
1c3cd3173b91a491b157b133a03b2d2ecc200075 | 59 | py | Python | src/datetime_matcher/__init__.py | stephen-zhao/datetime_matcher | 86db60d3a0158e46660a6e957db595d38e23c664 | [
"MIT"
] | 6 | 2020-10-11T07:31:42.000Z | 2022-01-09T08:53:51.000Z | src/datetime_matcher/__init__.py | stephen-zhao/datetime_matcher | 86db60d3a0158e46660a6e957db595d38e23c664 | [
"MIT"
] | null | null | null | src/datetime_matcher/__init__.py | stephen-zhao/datetime_matcher | 86db60d3a0158e46660a6e957db595d38e23c664 | [
"MIT"
] | null | null | null | from .datetime_matcher import DatetimeMatcher, DfregexToken | 59 | 59 | 0.898305 | from .datetime_matcher import DatetimeMatcher, DfregexToken | true | true |
1c3cd3519eef1fb24e3598c4261610d05a517f83 | 341 | py | Python | Idat_Python2022/Semana_4/practica_web/numeroentero.py | Kennethguerra3/Python_Ejercicio_2022 | cf1297cf1e1585eba699e32c02993818c3d9ecbf | [
"MIT"
] | null | null | null | Idat_Python2022/Semana_4/practica_web/numeroentero.py | Kennethguerra3/Python_Ejercicio_2022 | cf1297cf1e1585eba699e32c02993818c3d9ecbf | [
"MIT"
] | null | null | null | Idat_Python2022/Semana_4/practica_web/numeroentero.py | Kennethguerra3/Python_Ejercicio_2022 | cf1297cf1e1585eba699e32c02993818c3d9ecbf | [
"MIT"
] | null | null | null | #Escribir un programa que pida al usuario dos números y devuelva su división. Si el usuario no introduce números
# debe devolver un aviso de error y si el divisor es cero también.
n = int(input("Introduce un número entero: "))
if n % 2 == 0:
print("El número " + str(n) + " es par")
else:
print("El número " + str(n) + " es impar") | 42.625 | 113 | 0.674487 |
n = int(input("Introduce un número entero: "))
if n % 2 == 0:
print("El número " + str(n) + " es par")
else:
print("El número " + str(n) + " es impar") | true | true |
1c3cd380c4691e84faf50155fddf4ada75c9b738 | 1,492 | py | Python | gazeclassify/tests/unit/test_DistanceToMask.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 6 | 2021-02-25T01:17:09.000Z | 2022-03-19T07:13:52.000Z | gazeclassify/tests/unit/test_DistanceToMask.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 3 | 2021-05-10T07:38:24.000Z | 2021-06-07T12:59:29.000Z | gazeclassify/tests/unit/test_DistanceToMask.py | Flow000/gazeclassify | dda4c8cd62ad84615f4272171f1635ab683f9bed | [
"MIT"
] | 1 | 2021-06-24T12:58:01.000Z | 2021-06-24T12:58:01.000Z | import numpy as np # type: ignore
from gazeclassify.service.gaze_distance import DistanceToShape
class Test_Measuring2DDistanceGazeTo_Shape:
def test_read_binary_image_mask_and_calculate_distance_gaze_to_closest_pixel(self) -> None:
image_mask = np.zeros((3, 3))
image_mask[0, 0] = 1
image_mask[0, 1] = 1
gaze_x = 2
gaze_y = 0
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == 2
def test_read_boolean_2D_mask_and_identify_distance_to_gaze_should_return_sqrt2_when_diagnoally(self) -> None:
image_mask = np.array(
[
[1, 0],
[0, 0]
]
)
gaze_x = 1
gaze_y = 1
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == np.sqrt(2)
def test_read_boolean_2D_mask_if_no_shape_detected_return_None(self) -> None:
image_mask = np.array(
[
[0, 0],
[0, 0]
]
)
gaze_x = 1
gaze_y = 1
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == None | 32.434783 | 114 | 0.628686 | import numpy as np
from gazeclassify.service.gaze_distance import DistanceToShape
class Test_Measuring2DDistanceGazeTo_Shape:
def test_read_binary_image_mask_and_calculate_distance_gaze_to_closest_pixel(self) -> None:
image_mask = np.zeros((3, 3))
image_mask[0, 0] = 1
image_mask[0, 1] = 1
gaze_x = 2
gaze_y = 0
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == 2
def test_read_boolean_2D_mask_and_identify_distance_to_gaze_should_return_sqrt2_when_diagnoally(self) -> None:
image_mask = np.array(
[
[1, 0],
[0, 0]
]
)
gaze_x = 1
gaze_y = 1
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == np.sqrt(2)
def test_read_boolean_2D_mask_if_no_shape_detected_return_None(self) -> None:
image_mask = np.array(
[
[0, 0],
[0, 0]
]
)
gaze_x = 1
gaze_y = 1
pixel_distance = DistanceToShape(image_mask)
pixel_distance.detect_shape(positive_values=1)
distance = pixel_distance.distance_2d(gaze_x, gaze_y)
assert distance == None | true | true |
1c3cd52dbd37e56aea06d7d7692db8e6b0ba1f89 | 688 | py | Python | zh/conf.py | NewBLife/docs | 48ecb8ef234fd2f97537d36a76135e4b936b0c0a | [
"MIT"
] | null | null | null | zh/conf.py | NewBLife/docs | 48ecb8ef234fd2f97537d36a76135e4b936b0c0a | [
"MIT"
] | null | null | null | zh/conf.py | NewBLife/docs | 48ecb8ef234fd2f97537d36a76135e4b936b0c0a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# CakePHP Cookbook documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 18 12:54:14 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Append the top level directory of the docs, so we can import from the config dir.
sys.path.insert(0, os.path.abspath('..'))
# Pull in all the configuration options defined in the global config file..
from config.all import *
language = 'zh'
| 29.913043 | 83 | 0.74564 |
import sys, os
sys.path.insert(0, os.path.abspath('..'))
from config.all import *
language = 'zh'
| true | true |
1c3cd6049915e6397d5c077ef6c8c7d14f3f4ae5 | 690 | py | Python | toggle-testing.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | 3 | 2020-08-14T17:48:10.000Z | 2020-08-14T17:50:38.000Z | toggle-testing.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | 282 | 2020-01-19T18:31:10.000Z | 2021-07-30T06:31:38.000Z | toggle-testing.py | ratanawang/Dentaku | 67f39a2d297b0bbd6b468ea6f2dd7a65683a0f5a | [
"MIT"
] | 11 | 2020-01-18T07:37:44.000Z | 2020-01-31T23:53:20.000Z | import json
with open("database.json", 'r') as outfile:
database = json.load(outfile)
if 'testing' in database:
if database['testing'] == 'y':
database['testing'] = 'n'
else:
database['testing'] = 'y'
print("Testing mode has been turned " + ('on' if database['testing'] == 'y' else 'off'))
else:
print("Testing mode will restrict all bot interactions to direct messages, or ThreadType.USER.")
database['testing'] = input("Turn on testing mode? (This decision will be saved). (y/n): ")
with open("database.json", 'w') as outfile:
json.dump(database, outfile)
print("Your decision has been saved. database['testing'] = " + database['testing'])
| 34.5 | 100 | 0.649275 | import json
with open("database.json", 'r') as outfile:
database = json.load(outfile)
if 'testing' in database:
if database['testing'] == 'y':
database['testing'] = 'n'
else:
database['testing'] = 'y'
print("Testing mode has been turned " + ('on' if database['testing'] == 'y' else 'off'))
else:
print("Testing mode will restrict all bot interactions to direct messages, or ThreadType.USER.")
database['testing'] = input("Turn on testing mode? (This decision will be saved). (y/n): ")
with open("database.json", 'w') as outfile:
json.dump(database, outfile)
print("Your decision has been saved. database['testing'] = " + database['testing'])
| true | true |
1c3cd6999e5c29f66b566ec746f14f5edce34d59 | 14,217 | py | Python | tools/train_net.py | jcjs/FPN-Pytorch | 423a4499c4e826d17367762e821b51b9b1b0f2f3 | [
"MIT"
] | 271 | 2018-11-23T02:13:19.000Z | 2021-05-08T08:17:52.000Z | tools/train_net.py | jcjs/FPN-Pytorch | 423a4499c4e826d17367762e821b51b9b1b0f2f3 | [
"MIT"
] | 8 | 2018-11-23T11:40:37.000Z | 2021-08-09T13:15:44.000Z | tools/train_net.py | jcjs/FPN-Pytorch | 423a4499c4e826d17367762e821b51b9b1b0f2f3 | [
"MIT"
] | 57 | 2018-11-23T07:00:09.000Z | 2021-12-19T03:49:35.000Z | """ Training Script """
import argparse
import distutils.util
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0) # pytorch issue 1355: possible deadlock in dataloader
import _init_paths # pylint: disable=unused-import
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from modeling.model_builder import Generalized_RCNN
from roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import log_stats
from utils.timer import Timer
from utils.training_stats import TrainingStats
# OpenCL may be enabled by default in OpenCV3; disable it because it's not
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
"""Parse input arguments"""
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=100, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_epochs',
help='Epochs to decay the learning rate on. '
'Decay happens on the beginning of a epoch. '
'Epoch is 0-indexed.',
default=[4, 5], nargs='+', type=int)
# Epoch
parser.add_argument(
'--start_iter',
help='Starting iteration for first training epoch. 0-indexed.',
default=0, type=int)
parser.add_argument(
'--start_epoch',
help='Starting epoch count. Epoch is 0-indexed.',
default=0, type=int)
parser.add_argument(
'--epochs', dest='num_epochs',
help='Number of epochs to train',
default=6, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--ckpt_num_per_epoch',
help='number of checkpoints to save in each epoch. '
'Not include the one at the end of an epoch.',
default=3, type=int)
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def main():
"""Main function"""
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
print('Batch size change from {} (in config file) to {}'.format(
original_batch_size, args.batch_size))
print('NUM_GPUs: %d, TRAIN.IMS_PER_BATCH: %d' % (cfg.NUM_GPUS, cfg.TRAIN.IMS_PER_BATCH))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Adjust learning based on batch size change linearly
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch size change: {} --> {}'.format(
old_base_lr, cfg.SOLVER.BASE_LR))
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
train_size = len(roidb)
logger.info('{:d} roidb entries'.format(train_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
sampler = MinibatchSampler(ratio_list, ratio_index)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
assert_and_infer_cfg()
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
bias_params = []
nonbias_params = []
for key, value in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
else:
nonbias_params.append(value)
params = [
{'params': nonbias_params,
'lr': cfg.SOLVER.BASE_LR,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': cfg.SOLVER.BASE_LR * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}
]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
assert checkpoint['iters_per_epoch'] == train_size // args.batch_size, \
"iters_per_epoch should match for resume"
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
# optimizer.load_state_dict(checkpoint['optimizer'])
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
if checkpoint['step'] == (checkpoint['iters_per_epoch'] - 1):
# Resume from end of an epoch
args.start_epoch = checkpoint['epoch'] + 1
args.start_iter = 0
else:
# Resume from the middle of an epoch.
# NOTE: dataloader is not synced with previous state
args.start_epoch = checkpoint['epoch']
args.start_iter = checkpoint['step'] + 1
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron: #TODO resume for detectron weights (load sgd momentum values)
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr'] # lr of non-bias parameters, for commmand line outputs.
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
### Training Setups ###
args.run_name = misc_utils.get_run_name()
output_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
# Set the Tensorboard logger
tblogger = SummaryWriter(output_dir)
### Training Loop ###
maskRCNN.train()
training_stats = TrainingStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
iters_per_epoch = int(train_size / args.batch_size) # drop last
args.iters_per_epoch = iters_per_epoch
ckpt_interval_per_epoch = iters_per_epoch // args.ckpt_num_per_epoch
try:
logger.info('Training starts !')
args.step = args.start_iter
global_step = iters_per_epoch * args.start_epoch + args.step
for args.epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):
# ---- Start of epoch ----
# adjust learning rate
if args.lr_decay_epochs and args.epoch == args.lr_decay_epochs[0] and args.start_iter == 0:
args.lr_decay_epochs.pop(0)
net_utils.decay_learning_rate(optimizer, lr, cfg.SOLVER.GAMMA)
lr *= cfg.SOLVER.GAMMA
for args.step, input_data in zip(range(args.start_iter, iters_per_epoch), dataloader):
for key in input_data:
if key != 'roidb': # roidb is a list of ndarrays with inconsistent length
input_data[key] = list(map(Variable, input_data[key]))
training_stats.IterTic()
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs)
loss = net_outputs['total_loss']
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_stats.IterToc()
if (args.step+1) % ckpt_interval_per_epoch == 0:
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
if args.step % args.disp_interval == 0:
log_training_stats(training_stats, global_step, lr)
global_step += 1
# ---- End of epoch ----
# save checkpoint
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
# reset starting iter number after first epoch
args.start_iter = 0
# ---- Training ends ----
if iters_per_epoch % args.disp_interval != 0:
# log last stats at the end
log_training_stats(training_stats, global_step, lr)
except (RuntimeError, KeyboardInterrupt):
logger.info('Save ckpt on exception ...')
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
def log_training_stats(training_stats, global_step, lr):
stats = training_stats.GetStats(global_step, lr)
log_stats(stats, training_stats.misc_args)
if training_stats.tblogger:
training_stats.tb_log_stats(stats, global_step)
if __name__ == '__main__':
main()
| 37.217277 | 104 | 0.630091 |
import argparse
import distutils.util
import os
import sys
import pickle
import resource
import traceback
import logging
from collections import defaultdict
import numpy as np
import yaml
import torch
from torch.autograd import Variable
import torch.nn as nn
import cv2
cv2.setNumThreads(0)
import _init_paths
import nn as mynn
import utils.net as net_utils
import utils.misc as misc_utils
from core.config import cfg, cfg_from_file, cfg_from_list, assert_and_infer_cfg
from datasets.roidb import combined_roidb_for_training
from modeling.model_builder import Generalized_RCNN
from roi_data.loader import RoiDataLoader, MinibatchSampler, collate_minibatch
from utils.detectron_weight_helper import load_detectron_weight
from utils.logging import log_stats
from utils.timer import Timer
from utils.training_stats import TrainingStats
# thread safe and causes unwanted GPU memory allocations.
cv2.ocl.setUseOpenCL(False)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# RuntimeError: received 0 items of ancdata. Issue: pytorch/pytorch#973
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def parse_args():
parser = argparse.ArgumentParser(description='Train a X-RCNN network')
parser.add_argument(
'--dataset', dest='dataset', required=True,
help='Dataset to use')
parser.add_argument(
'--cfg', dest='cfg_file', required=True,
help='Config file for training (and optionally testing)')
parser.add_argument(
'--set', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]',
default=[], nargs='+')
parser.add_argument(
'--disp_interval',
help='Display training info every N iterations',
default=100, type=int)
parser.add_argument(
'--no_cuda', dest='cuda', help='Do not use CUDA device', action='store_false')
# Optimization
# These options has the highest prioity and can overwrite the values in config file
# or values set by set_cfgs. `None` means do not overwrite.
parser.add_argument(
'--bs', dest='batch_size',
help='Explicitly specify to overwrite the value comed from cfg_file.',
type=int)
parser.add_argument(
'--nw', dest='num_workers',
help='Explicitly specify to overwrite number of workers to load data. Defaults to 4',
type=int)
parser.add_argument(
'--o', dest='optimizer', help='Training optimizer.',
default=None)
parser.add_argument(
'--lr', help='Base learning rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_gamma',
help='Learning rate decay rate.',
default=None, type=float)
parser.add_argument(
'--lr_decay_epochs',
help='Epochs to decay the learning rate on. '
'Decay happens on the beginning of a epoch. '
'Epoch is 0-indexed.',
default=[4, 5], nargs='+', type=int)
# Epoch
parser.add_argument(
'--start_iter',
help='Starting iteration for first training epoch. 0-indexed.',
default=0, type=int)
parser.add_argument(
'--start_epoch',
help='Starting epoch count. Epoch is 0-indexed.',
default=0, type=int)
parser.add_argument(
'--epochs', dest='num_epochs',
help='Number of epochs to train',
default=6, type=int)
# Resume training: requires same iterations per epoch
parser.add_argument(
'--resume',
help='resume to training on a checkpoint',
action='store_true')
parser.add_argument(
'--no_save', help='do not save anything', action='store_true')
parser.add_argument(
'--ckpt_num_per_epoch',
help='number of checkpoints to save in each epoch. '
'Not include the one at the end of an epoch.',
default=3, type=int)
parser.add_argument(
'--load_ckpt', help='checkpoint path to load')
parser.add_argument(
'--load_detectron', help='path to the detectron weight pickle file')
parser.add_argument(
'--use_tfboard', help='Use tensorflow tensorboard to log training info',
action='store_true')
return parser.parse_args()
def main():
args = parse_args()
print('Called with args:')
print(args)
if not torch.cuda.is_available():
sys.exit("Need a CUDA device to run the code.")
if args.cuda or cfg.NUM_GPUS > 0:
cfg.CUDA = True
else:
raise ValueError("Need Cuda device to run !")
if args.dataset == "coco2017":
cfg.TRAIN.DATASETS = ('coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 81
elif args.dataset == "keypoints_coco2017":
cfg.TRAIN.DATASETS = ('keypoints_coco_2017_train',)
cfg.MODEL.NUM_CLASSES = 2
else:
raise ValueError("Unexpected args.dataset: {}".format(args.dataset))
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
### Adaptively adjust some configs ###
original_batch_size = cfg.NUM_GPUS * cfg.TRAIN.IMS_PER_BATCH
if args.batch_size is None:
args.batch_size = original_batch_size
cfg.NUM_GPUS = torch.cuda.device_count()
assert (args.batch_size % cfg.NUM_GPUS) == 0, \
'batch_size: %d, NUM_GPUS: %d' % (args.batch_size, cfg.NUM_GPUS)
cfg.TRAIN.IMS_PER_BATCH = args.batch_size // cfg.NUM_GPUS
print('Batch size change from {} (in config file) to {}'.format(
original_batch_size, args.batch_size))
print('NUM_GPUs: %d, TRAIN.IMS_PER_BATCH: %d' % (cfg.NUM_GPUS, cfg.TRAIN.IMS_PER_BATCH))
if args.num_workers is not None:
cfg.DATA_LOADER.NUM_THREADS = args.num_workers
print('Number of data loading threads: %d' % cfg.DATA_LOADER.NUM_THREADS)
### Adjust learning based on batch size change linearly
old_base_lr = cfg.SOLVER.BASE_LR
cfg.SOLVER.BASE_LR *= args.batch_size / original_batch_size
print('Adjust BASE_LR linearly according to batch size change: {} --> {}'.format(
old_base_lr, cfg.SOLVER.BASE_LR))
### Overwrite some solver settings from command line arguments
if args.optimizer is not None:
cfg.SOLVER.TYPE = args.optimizer
if args.lr is not None:
cfg.SOLVER.BASE_LR = args.lr
if args.lr_decay_gamma is not None:
cfg.SOLVER.GAMMA = args.lr_decay_gamma
timers = defaultdict(Timer)
### Dataset ###
timers['roidb'].tic()
roidb, ratio_list, ratio_index = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES)
timers['roidb'].toc()
train_size = len(roidb)
logger.info('{:d} roidb entries'.format(train_size))
logger.info('Takes %.2f sec(s) to construct roidb', timers['roidb'].average_time)
sampler = MinibatchSampler(ratio_list, ratio_index)
dataset = RoiDataLoader(
roidb,
cfg.MODEL.NUM_CLASSES,
training=True)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
num_workers=cfg.DATA_LOADER.NUM_THREADS,
collate_fn=collate_minibatch)
assert_and_infer_cfg()
### Model ###
maskRCNN = Generalized_RCNN()
if cfg.CUDA:
maskRCNN.cuda()
### Optimizer ###
bias_params = []
nonbias_params = []
for key, value in dict(maskRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
bias_params.append(value)
else:
nonbias_params.append(value)
params = [
{'params': nonbias_params,
'lr': cfg.SOLVER.BASE_LR,
'weight_decay': cfg.SOLVER.WEIGHT_DECAY},
{'params': bias_params,
'lr': cfg.SOLVER.BASE_LR * (cfg.SOLVER.BIAS_DOUBLE_LR + 1),
'weight_decay': cfg.SOLVER.WEIGHT_DECAY if cfg.SOLVER.BIAS_WEIGHT_DECAY else 0}
]
if cfg.SOLVER.TYPE == "SGD":
optimizer = torch.optim.SGD(params, momentum=cfg.SOLVER.MOMENTUM)
elif cfg.SOLVER.TYPE == "Adam":
optimizer = torch.optim.Adam(params)
### Load checkpoint
if args.load_ckpt:
load_name = args.load_ckpt
logging.info("loading checkpoint %s", load_name)
checkpoint = torch.load(load_name, map_location=lambda storage, loc: storage)
net_utils.load_ckpt(maskRCNN, checkpoint['model'])
if args.resume:
assert checkpoint['iters_per_epoch'] == train_size // args.batch_size, \
"iters_per_epoch should match for resume"
# There is a bug in optimizer.load_state_dict on Pytorch 0.3.1.
# However it's fixed on master.
misc_utils.load_optimizer_state_dict(optimizer, checkpoint['optimizer'])
if checkpoint['step'] == (checkpoint['iters_per_epoch'] - 1):
args.start_epoch = checkpoint['epoch'] + 1
args.start_iter = 0
else:
args.start_epoch = checkpoint['epoch']
args.start_iter = checkpoint['step'] + 1
del checkpoint
torch.cuda.empty_cache()
if args.load_detectron:
logging.info("loading Detectron weights %s", args.load_detectron)
load_detectron_weight(maskRCNN, args.load_detectron)
lr = optimizer.param_groups[0]['lr']
maskRCNN = mynn.DataParallel(maskRCNN, cpu_keywords=['im_info', 'roidb'],
minibatch=True)
tput_dir = misc_utils.get_output_dir(args, args.run_name)
args.cfg_filename = os.path.basename(args.cfg_file)
if not args.no_save:
if not os.path.exists(output_dir):
os.makedirs(output_dir)
blob = {'cfg': yaml.dump(cfg), 'args': args}
with open(os.path.join(output_dir, 'config_and_args.pkl'), 'wb') as f:
pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)
if args.use_tfboard:
from tensorboardX import SummaryWriter
tblogger = SummaryWriter(output_dir)
ningStats(
args,
args.disp_interval,
tblogger if args.use_tfboard and not args.no_save else None)
iters_per_epoch = int(train_size / args.batch_size)
args.iters_per_epoch = iters_per_epoch
ckpt_interval_per_epoch = iters_per_epoch // args.ckpt_num_per_epoch
try:
logger.info('Training starts !')
args.step = args.start_iter
global_step = iters_per_epoch * args.start_epoch + args.step
for args.epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):
if args.lr_decay_epochs and args.epoch == args.lr_decay_epochs[0] and args.start_iter == 0:
args.lr_decay_epochs.pop(0)
net_utils.decay_learning_rate(optimizer, lr, cfg.SOLVER.GAMMA)
lr *= cfg.SOLVER.GAMMA
for args.step, input_data in zip(range(args.start_iter, iters_per_epoch), dataloader):
for key in input_data:
if key != 'roidb':
input_data[key] = list(map(Variable, input_data[key]))
training_stats.IterTic()
net_outputs = maskRCNN(**input_data)
training_stats.UpdateIterStats(net_outputs)
loss = net_outputs['total_loss']
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_stats.IterToc()
if (args.step+1) % ckpt_interval_per_epoch == 0:
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
if args.step % args.disp_interval == 0:
log_training_stats(training_stats, global_step, lr)
global_step += 1
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
args.start_iter = 0
if iters_per_epoch % args.disp_interval != 0:
log_training_stats(training_stats, global_step, lr)
except (RuntimeError, KeyboardInterrupt):
logger.info('Save ckpt on exception ...')
net_utils.save_ckpt(output_dir, args, maskRCNN, optimizer)
logger.info('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
finally:
if args.use_tfboard and not args.no_save:
tblogger.close()
def log_training_stats(training_stats, global_step, lr):
stats = training_stats.GetStats(global_step, lr)
log_stats(stats, training_stats.misc_args)
if training_stats.tblogger:
training_stats.tb_log_stats(stats, global_step)
if __name__ == '__main__':
main()
| true | true |
1c3cd8bbbc3e95c30a6c446b891b099e171450d6 | 5,420 | py | Python | myparser.py | zejiangp/BlockConv | 7034f70a74ec69b2d49dcddce9ecbea7e544ddd7 | [
"MIT"
] | 11 | 2022-01-10T06:40:17.000Z | 2022-02-16T06:03:17.000Z | myparser.py | zejiangp/BlockConv | 7034f70a74ec69b2d49dcddce9ecbea7e544ddd7 | [
"MIT"
] | null | null | null | myparser.py | zejiangp/BlockConv | 7034f70a74ec69b2d49dcddce9ecbea7e544ddd7 | [
"MIT"
] | 1 | 2021-12-16T10:55:43.000Z | 2021-12-16T10:55:43.000Z | import argparse
import operator
def get_parser():
parser = argparse.ArgumentParser(description="Simple code for train and test on ImageNet and Cifar")
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', help='model architecture'+'(default: resnet18)')
parser.add_argument('--workers', '-j', metavar='N', type=int, default=4, help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', metavar='EPOCH', type=int, default=360, help='number of total epochs to run (default: 2)')
parser.add_argument('--batch_size', '-b', metavar='BATCH_SIZE', type=int, default=512, help='mini-batch size (default: 256)')
parser.add_argument('--print_freq', '-p', metavar='N', type=int, default=10, help='print frequence (default: 10)')
parser.add_argument('--gpus', metavar='DEV_ID', default=None, help='Comma-separated list of GPU device IDs to be used (default is to use all available devices)')
parser.add_argument('--cpu', action='store_true', default=False, help='Use CPU only.\n'
'Flag not set => uses GPUs according to the --gpus flag value.'
'Flag set => overrides the --gpus flag')
parser.add_argument('--do_eval', action='store_true', help='evaluate model')
parser.add_argument('--do_train', action='store_true', help='train model')
parser.add_argument('--name', '-n', metavar='NAME', default=None, help='Experiment name')
parser.add_argument('--out_dir', '-o', dest='output_dir', default='logs/resnet18', help='Path to dump logs and checkpoints')
parser.add_argument('--dataset', dest='dataset', type=str, default='cifar10', help='dataset used to train (default: cifar10)')
parser.add_argument('--deterministic', '--det', action='store_true', help='Ensure deterministic execution for re-producible results.')
parser.add_argument('--validation-split', '--valid-size', '--vs', dest='validation_split',
type=float_range(exc_max=True), default=0., help='Portion of training dataset to set aside for validation (default: 0.0)')
parser.add_argument('--effective-train-size', '--etrs', type=float_range(exc_min=True), default=1.,
help='Portion of training dataset to be used in each epoch. '
'NOTE: If --validation-split is set, then the value of this argument is applied '
'AFTER the train-validation split according to that argument')
parser.add_argument('--effective-valid-size', '--evs', type=float_range(exc_min=True), default=1.,
help='Portion of validation dataset to be used in each epoch. '
'NOTE: If --validation-split is set, then the value of this argument is applied '
'AFTER the train-validation split according to that argument')
parser.add_argument('--effective-test-size', '--etes', type=float_range(exc_min=True), default=1.,
help='Portion of test dataset to be used in each epoch')
parser.add_argument('--disable_tqdm', action='store_true', help='disable tqdm')
parser.add_argument('--block_size', default=None, help='block size')
parser.add_argument('--type', default=None, type=int, help='type of block size ( 0 or 1 )')
parser.add_argument('--padding_mode', default=None, help='padding mode ("constant", "replicate", "reflect")')
optimizer_args = parser.add_argument_group('Optimizer arguments')
optimizer_args.add_argument('--learning_rate', '--lr', metavar='LR', type=float, default=0.1, help='initial learning rate (default: 0.1)')
optimizer_args.add_argument('--momentum', metavar='M', type=float, default=0.9, help='momentum (default: 0.9)')
optimizer_args.add_argument('--weight_decay', '--wd', metavar='W', type=float, default=5e-4, help='weight decay (default: 1e-4)')
optimizer_args.add_argument('--milestones', '--ms', default=None, help='Milestones for MultiStepLR')
load_checkpoint_group = parser.add_argument_group('Resuming arguments')
load_checkpoint_group_exc = load_checkpoint_group.add_mutually_exclusive_group()
load_checkpoint_group_exc.add_argument('--resume_from', dest='resumed_checkpoint_path', default='', type=str, metavar='PATH',
help='path to latest checkpoint. Use to resume paused training session.')
load_checkpoint_group.add_argument('--reset_optimizer', action='store_true', help='Flag to override optimizer if resumed from checkpoint. This will reset epochs count.')
return parser
def float_range(min_val=0., max_val=1., exc_min=False, exc_max=False):
def checker(val_str):
val = float(val_str)
min_op, min_op_str = (operator.gt, '>') if exc_min else (operator.ge, '>=')
max_op, max_op_str = (operator.lt, '<') if exc_max else (operator.le, '<=')
if min_op(val, min_val) and max_op(val, max_val):
return val
else:
raise ValueError('Value must be {} {} and {} {} (received {})'.format(min_op_str, min_val, max_op_str, max_val, val))
if min_val >= max_val:
raise ValueError('min_val must be less than max_val')
return checker
| 77.428571 | 173 | 0.656827 | import argparse
import operator
def get_parser():
parser = argparse.ArgumentParser(description="Simple code for train and test on ImageNet and Cifar")
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', help='model architecture'+'(default: resnet18)')
parser.add_argument('--workers', '-j', metavar='N', type=int, default=4, help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', metavar='EPOCH', type=int, default=360, help='number of total epochs to run (default: 2)')
parser.add_argument('--batch_size', '-b', metavar='BATCH_SIZE', type=int, default=512, help='mini-batch size (default: 256)')
parser.add_argument('--print_freq', '-p', metavar='N', type=int, default=10, help='print frequence (default: 10)')
parser.add_argument('--gpus', metavar='DEV_ID', default=None, help='Comma-separated list of GPU device IDs to be used (default is to use all available devices)')
parser.add_argument('--cpu', action='store_true', default=False, help='Use CPU only.\n'
'Flag not set => uses GPUs according to the --gpus flag value.'
'Flag set => overrides the --gpus flag')
parser.add_argument('--do_eval', action='store_true', help='evaluate model')
parser.add_argument('--do_train', action='store_true', help='train model')
parser.add_argument('--name', '-n', metavar='NAME', default=None, help='Experiment name')
parser.add_argument('--out_dir', '-o', dest='output_dir', default='logs/resnet18', help='Path to dump logs and checkpoints')
parser.add_argument('--dataset', dest='dataset', type=str, default='cifar10', help='dataset used to train (default: cifar10)')
parser.add_argument('--deterministic', '--det', action='store_true', help='Ensure deterministic execution for re-producible results.')
parser.add_argument('--validation-split', '--valid-size', '--vs', dest='validation_split',
type=float_range(exc_max=True), default=0., help='Portion of training dataset to set aside for validation (default: 0.0)')
parser.add_argument('--effective-train-size', '--etrs', type=float_range(exc_min=True), default=1.,
help='Portion of training dataset to be used in each epoch. '
'NOTE: If --validation-split is set, then the value of this argument is applied '
'AFTER the train-validation split according to that argument')
parser.add_argument('--effective-valid-size', '--evs', type=float_range(exc_min=True), default=1.,
help='Portion of validation dataset to be used in each epoch. '
'NOTE: If --validation-split is set, then the value of this argument is applied '
'AFTER the train-validation split according to that argument')
parser.add_argument('--effective-test-size', '--etes', type=float_range(exc_min=True), default=1.,
help='Portion of test dataset to be used in each epoch')
parser.add_argument('--disable_tqdm', action='store_true', help='disable tqdm')
parser.add_argument('--block_size', default=None, help='block size')
parser.add_argument('--type', default=None, type=int, help='type of block size ( 0 or 1 )')
parser.add_argument('--padding_mode', default=None, help='padding mode ("constant", "replicate", "reflect")')
optimizer_args = parser.add_argument_group('Optimizer arguments')
optimizer_args.add_argument('--learning_rate', '--lr', metavar='LR', type=float, default=0.1, help='initial learning rate (default: 0.1)')
optimizer_args.add_argument('--momentum', metavar='M', type=float, default=0.9, help='momentum (default: 0.9)')
optimizer_args.add_argument('--weight_decay', '--wd', metavar='W', type=float, default=5e-4, help='weight decay (default: 1e-4)')
optimizer_args.add_argument('--milestones', '--ms', default=None, help='Milestones for MultiStepLR')
load_checkpoint_group = parser.add_argument_group('Resuming arguments')
load_checkpoint_group_exc = load_checkpoint_group.add_mutually_exclusive_group()
load_checkpoint_group_exc.add_argument('--resume_from', dest='resumed_checkpoint_path', default='', type=str, metavar='PATH',
help='path to latest checkpoint. Use to resume paused training session.')
load_checkpoint_group.add_argument('--reset_optimizer', action='store_true', help='Flag to override optimizer if resumed from checkpoint. This will reset epochs count.')
return parser
def float_range(min_val=0., max_val=1., exc_min=False, exc_max=False):
def checker(val_str):
val = float(val_str)
min_op, min_op_str = (operator.gt, '>') if exc_min else (operator.ge, '>=')
max_op, max_op_str = (operator.lt, '<') if exc_max else (operator.le, '<=')
if min_op(val, min_val) and max_op(val, max_val):
return val
else:
raise ValueError('Value must be {} {} and {} {} (received {})'.format(min_op_str, min_val, max_op_str, max_val, val))
if min_val >= max_val:
raise ValueError('min_val must be less than max_val')
return checker
| true | true |
1c3cdb04737489ab50f8af6ca2ec50a5537b7d7d | 10,309 | py | Python | chexnet_client.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 16 | 2018-12-23T22:19:47.000Z | 2020-08-13T16:30:33.000Z | chexnet_client.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 21 | 2018-10-18T16:29:49.000Z | 2021-06-16T12:15:58.000Z | chexnet_client.py | kholohan/chexnet | e8cb9bf2365326210d64b09ccfd503a858485941 | [
"MIT"
] | 12 | 2018-12-23T22:19:53.000Z | 2020-12-21T12:06:09.000Z | import cv2
import grpc
from configparser import ConfigParser
from confluent_kafka import Producer, Consumer, KafkaError, KafkaException
import generator
import io
import json
import keras.backend as K
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
import scipy.misc
from skimage.transform import resize
from io import StringIO
import sys
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import tensor_util
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import threading
# TODO explore extending model definition in SavedModel
# to account for returning a Class Activation Map (CAM)
# for overlay onto xray image that has been uploaded
config_file = "./sample_config.ini"
cp = ConfigParser()
cp.read(config_file)
bootstrap_server = cp["KAFKA"].get("bootstrap_server")
bootstrap_port = cp["KAFKA"].get("bootstrap_port")
group_id = cp["KAFKA"].get("group_id")
inference_kafka_topic = cp["KAFKA"].get("inference_kafka_topic").split(',')
results_kafka_topic = cp["KAFKA"].get("results_kafka_topic")
offset = cp["KAFKA"].get("offset_reset")
class_names = cp["DEFAULT"].get("class_names").split(",")
def logger():
"""Logger instance
Logs will be emitted when poll() is called when used with Consumer and/or Producer
Returns:
[logging.Logger] -- Logging object
"""
logger = logging.getLogger('chexnet_client')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
return logger
logs = logger()
def kafka_consumer() -> Consumer:
"""Connect and consume data from Kafka Broker
Returns:
Consumer -- return Consumer object
"""
c = Consumer({
'bootstrap.servers': bootstrap_server,
'group.id': group_id,
'auto.offset.reset': offset
}, logger=logs)
return c
def kafka_producer() -> Producer:
"""Connect and publish data to Kafka broker
Returns:
Producer -- [description]
"""
p = Producer({
'bootstrap.servers': bootstrap_server,
'message.max.bytes': 10000000
}, logger=logs)
return p
def kafka_delivery_report(err, msg):
"""Called once for each messaged produced to indicate delivery result
Triggered by poll() or flush()
"""
if err is not None:
logs.info('Message delivery failed! : {}'.format(err))
else:
logs.info('Message delivered to {} [{}] at offset [{}]'.format(msg.topic(), msg.partition(), msg.offset()))
def do_inference(ts_server: str, ts_port: int, model_input):
"""
API call to perform inference over a given input
Arguments:
ts_sever {str} -- TensorFlow Serving IP
ts_port {int} -- TensorFlow Serving Port
model_input {[type]} -- Input tensor
"""
channel = grpc.insecure_channel(ts_server + ":" + str(ts_port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'DenseNet121'
request.model_spec.signature_name = 'predict'
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(model_input, dtype=types_pb2.DT_FLOAT, shape=[1, 224, 224, 3])
)
result_future = stub.Predict(request, 5.0)
prediction = tensor_util.MakeNdarray(result_future.outputs['prediction'])
class_weights = tensor_util.MakeNdarray(result_future.outputs['class_weights'])
final_conv_layer = tensor_util.MakeNdarray(result_future.outputs['final_conv_layer'])
logs.info("Successfully received response from TensorFlow Server!")
return prediction, class_weights, final_conv_layer
def image_transform(msg_payload) -> Image:
"""Transform message from Kafka message payload
Arguments:
msg_payload {Consumer.poll} -- message payload
Returns:
PIL.Image -- Image object
"""
image_bytes = bytearray(msg_payload.value())
image = Image.open(io.BytesIO(image_bytes))
orig_image_array = np.asarray(image.convert("RGB"))
image_array = orig_image_array / 255.
image_array = resize(image_array, (1, 224, 224, 3))
logs.info("topic : [%s] - offset : [%s] - image successfully transformed!", msg_payload.topic(), msg_payload.offset())
return image_array, orig_image_array
def marshall_message(img_bytes, aurocs) -> dict:
"""Marshall message to send over message bus
In the future I would rather use something like Protobufs / Avro instead of
raw JSON
Arguments:
img_bytes {bytearray} -- byte array to convert to string for transmission
aurocs {numpy array} -- numpy array of prediction results
Returns:
dict -- [description]
"""
ser_message = {}
img_bytes = img_bytes.decode('latin-1')
ser_message['image'] = img_bytes
ser_message['aurocs'] = aurocs
return json.dumps(ser_message)
def create_barchart(prediction_array):
"""Create a barchart for predictions
Arguments:
prediction_array {numpy array} -- Array of predictions returned from CheXNet Model
"""
y_pos = class_names
plt.barh(y_pos, prediction_array, align='center', alpha=0.5)
plt.yticks(y_pos, class_names)
plt.xlabel('Probability')
plt.title("Probability of given pathology")
plt.savefig("barchart.png")
def create_cams(feature_conv, weight_softmax, class_idx, orig_image_size):
"""
Create class activation maps and upsample to original image size
Arguments:
feature_conv {[type]} -- [description]
weight_softmax {[type]} -- [description]
class_idx {[type]} -- [description]
orig_image_size {[type]} -- [description]
"""
orig_size = orig_image_size
bz, nc, h, w = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, orig_size))
return output_cam
def collect_image(topic: str, kafka_session: Consumer):
"""Collect an image from the respective image topic
Arguments:
broker {str} -- Kafka client
topic {str} -- topic (ex. images)
"""
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
kafka_session.subscribe(topic, on_assign=print_assignment)
while True:
msg = kafka_session.poll(timeout=1.0)
if msg is None:
continue
logs.info("No messages available within topic : %s", topic)
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
logs.info('%% %s [%d] reached end of offset %d' %
(msg.topic(), msg.partition(), msg.offset()))
else:
logs.debug("Kafka Exception : %s", msg.error())
raise KafkaException(msg.error())
else:
# Well formed messaged
logs.info('%% %s [%d] at offset %d with key %s: ' %
(msg.topic(), msg.partition(), msg.offset(),
str(msg.key())))
# image transform
image_array, orig_image_array = image_transform(msg)
prediction, class_weights, final_conv_layer = do_inference(ts_server="172.23.0.9", ts_port=8500, model_input=image_array)
# create CAM
get_output = K.function([tf.convert_to_tensor(image_array)], [tf.convert_to_tensor(final_conv_layer), tf.convert_to_tensor(prediction)])
[conv_outputs, predictions] = get_output([image_array[0]])
conv_outputs = conv_outputs[0, :, :, :]
# TODO: Receiving variable results across CAMs generated by this
# method. Needs further investigation and comparison to original
# CAM paper found here : http://cnnlocalization.csail.mit.edu/
cam = np.zeros(dtype=np.float32, shape=(conv_outputs.shape[:2]))
for i, w in enumerate(class_weights[0]):
cam += w * conv_outputs[:, :, i]
cam = cam - np.min(cam)
cam /= np.max(cam)
#h,w = orig_image_array.shape[:2]
cam = cv2.resize(cam, orig_image_array.shape[:2])
# TODO : Investigate why the cv2.resize() function transposes
# the height and width of the orig_image_array
#cam = cv2.resize(cam, (orig_image_array.shape[:2][1], orig_image_array.shape[:2][0]), interpolation=cv2.INTER_CUBIC)
cam = np.uint8(255 * cam)
heatmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
#heatmap[np.where(cam < 0.2)] = 0
img = heatmap * 0.3 + orig_image_array
logs.info("Class Activation Map (CAM) Created!")
# This is complete hackery and will need to be replaced
# I don't know why a numpy array (see `img` array above)
# would be 25MB when all constituent arrays are ~ 7MB total.
# Let alone when saving an image to disk the image is only 1MB total.
cv2.imwrite("inflight_img.png", img)
new_img = Image.open("inflight_img.png", mode='r')
img_bytes = io.BytesIO()
new_img.save(img_bytes, format='PNG')
img_bytes = img_bytes.getvalue()
message = marshall_message(img_bytes, prediction.tolist())
os.remove("inflight_img.png")
p = kafka_producer()
p.poll(0)
p.produce(results_kafka_topic, value=message, callback=kafka_delivery_report)
p.flush()
def main():
# TODO: Restructure execution logic and break apart more
# complex functions such as collect_image(), etc.
# KISS and DRY should be applied...
kafka = kafka_consumer()
collect_image(inference_kafka_topic, kafka)
if __name__ == '__main__':
main() | 34.249169 | 148 | 0.647007 | import cv2
import grpc
from configparser import ConfigParser
from confluent_kafka import Producer, Consumer, KafkaError, KafkaException
import generator
import io
import json
import keras.backend as K
import logging
import matplotlib.pyplot as plt
import numpy as np
import os
from PIL import Image
import scipy.misc
from skimage.transform import resize
from io import StringIO
import sys
import tensorflow as tf
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import tensor_util
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import threading
config_file = "./sample_config.ini"
cp = ConfigParser()
cp.read(config_file)
bootstrap_server = cp["KAFKA"].get("bootstrap_server")
bootstrap_port = cp["KAFKA"].get("bootstrap_port")
group_id = cp["KAFKA"].get("group_id")
inference_kafka_topic = cp["KAFKA"].get("inference_kafka_topic").split(',')
results_kafka_topic = cp["KAFKA"].get("results_kafka_topic")
offset = cp["KAFKA"].get("offset_reset")
class_names = cp["DEFAULT"].get("class_names").split(",")
def logger():
logger = logging.getLogger('chexnet_client')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
return logger
logs = logger()
def kafka_consumer() -> Consumer:
c = Consumer({
'bootstrap.servers': bootstrap_server,
'group.id': group_id,
'auto.offset.reset': offset
}, logger=logs)
return c
def kafka_producer() -> Producer:
p = Producer({
'bootstrap.servers': bootstrap_server,
'message.max.bytes': 10000000
}, logger=logs)
return p
def kafka_delivery_report(err, msg):
if err is not None:
logs.info('Message delivery failed! : {}'.format(err))
else:
logs.info('Message delivered to {} [{}] at offset [{}]'.format(msg.topic(), msg.partition(), msg.offset()))
def do_inference(ts_server: str, ts_port: int, model_input):
channel = grpc.insecure_channel(ts_server + ":" + str(ts_port))
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = 'DenseNet121'
request.model_spec.signature_name = 'predict'
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(model_input, dtype=types_pb2.DT_FLOAT, shape=[1, 224, 224, 3])
)
result_future = stub.Predict(request, 5.0)
prediction = tensor_util.MakeNdarray(result_future.outputs['prediction'])
class_weights = tensor_util.MakeNdarray(result_future.outputs['class_weights'])
final_conv_layer = tensor_util.MakeNdarray(result_future.outputs['final_conv_layer'])
logs.info("Successfully received response from TensorFlow Server!")
return prediction, class_weights, final_conv_layer
def image_transform(msg_payload) -> Image:
image_bytes = bytearray(msg_payload.value())
image = Image.open(io.BytesIO(image_bytes))
orig_image_array = np.asarray(image.convert("RGB"))
image_array = orig_image_array / 255.
image_array = resize(image_array, (1, 224, 224, 3))
logs.info("topic : [%s] - offset : [%s] - image successfully transformed!", msg_payload.topic(), msg_payload.offset())
return image_array, orig_image_array
def marshall_message(img_bytes, aurocs) -> dict:
ser_message = {}
img_bytes = img_bytes.decode('latin-1')
ser_message['image'] = img_bytes
ser_message['aurocs'] = aurocs
return json.dumps(ser_message)
def create_barchart(prediction_array):
y_pos = class_names
plt.barh(y_pos, prediction_array, align='center', alpha=0.5)
plt.yticks(y_pos, class_names)
plt.xlabel('Probability')
plt.title("Probability of given pathology")
plt.savefig("barchart.png")
def create_cams(feature_conv, weight_softmax, class_idx, orig_image_size):
orig_size = orig_image_size
bz, nc, h, w = feature_conv.shape
output_cam = []
for idx in class_idx:
cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w)))
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255 * cam_img)
output_cam.append(cv2.resize(cam_img, orig_size))
return output_cam
def collect_image(topic: str, kafka_session: Consumer):
def print_assignment(consumer, partitions):
print('Assignment:', partitions)
kafka_session.subscribe(topic, on_assign=print_assignment)
while True:
msg = kafka_session.poll(timeout=1.0)
if msg is None:
continue
logs.info("No messages available within topic : %s", topic)
if msg.error():
if msg.error().code() == KafkaError._PARTITION_EOF:
logs.info('%% %s [%d] reached end of offset %d' %
(msg.topic(), msg.partition(), msg.offset()))
else:
logs.debug("Kafka Exception : %s", msg.error())
raise KafkaException(msg.error())
else:
logs.info('%% %s [%d] at offset %d with key %s: ' %
(msg.topic(), msg.partition(), msg.offset(),
str(msg.key())))
image_array, orig_image_array = image_transform(msg)
prediction, class_weights, final_conv_layer = do_inference(ts_server="172.23.0.9", ts_port=8500, model_input=image_array)
get_output = K.function([tf.convert_to_tensor(image_array)], [tf.convert_to_tensor(final_conv_layer), tf.convert_to_tensor(prediction)])
[conv_outputs, predictions] = get_output([image_array[0]])
conv_outputs = conv_outputs[0, :, :, :]
cam = np.zeros(dtype=np.float32, shape=(conv_outputs.shape[:2]))
for i, w in enumerate(class_weights[0]):
cam += w * conv_outputs[:, :, i]
cam = cam - np.min(cam)
cam /= np.max(cam)
cam = cv2.resize(cam, orig_image_array.shape[:2])
cam = np.uint8(255 * cam)
heatmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET)
img = heatmap * 0.3 + orig_image_array
logs.info("Class Activation Map (CAM) Created!")
# would be 25MB when all constituent arrays are ~ 7MB total.
# Let alone when saving an image to disk the image is only 1MB total.
cv2.imwrite("inflight_img.png", img)
new_img = Image.open("inflight_img.png", mode='r')
img_bytes = io.BytesIO()
new_img.save(img_bytes, format='PNG')
img_bytes = img_bytes.getvalue()
message = marshall_message(img_bytes, prediction.tolist())
os.remove("inflight_img.png")
p = kafka_producer()
p.poll(0)
p.produce(results_kafka_topic, value=message, callback=kafka_delivery_report)
p.flush()
def main():
# TODO: Restructure execution logic and break apart more
# complex functions such as collect_image(), etc.
# KISS and DRY should be applied...
kafka = kafka_consumer()
collect_image(inference_kafka_topic, kafka)
if __name__ == '__main__':
main() | true | true |
1c3cdbbac3409b1a7421bcfd1407cb3fb0ff29d4 | 1,603 | py | Python | developerweek2018/settings.py | ykifle/developerweek2018 | caa20e075f1deae800e85c399253271ab5397a48 | [
"BSD-3-Clause"
] | null | null | null | developerweek2018/settings.py | ykifle/developerweek2018 | caa20e075f1deae800e85c399253271ab5397a48 | [
"BSD-3-Clause"
] | null | null | null | developerweek2018/settings.py | ykifle/developerweek2018 | caa20e075f1deae800e85c399253271ab5397a48 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Application configuration."""
import os
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('DEVELOPERWEEK2018_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost:/developerweek2018'
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
# SQLALCHEMY_DATABASE_URI = 'postgresql://localhost:/developerweek2018'
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| 31.431373 | 101 | 0.692452 |
import os
class Config(object):
SECRET_KEY = os.environ.get('DEVELOPERWEEK2018_SECRET', 'secret-key')
APP_DIR = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple'
SQLALCHEMY_TRACK_MODIFICATIONS = False
WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'
class ProdConfig(Config):
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost:/developerweek2018'
DEBUG_TB_ENABLED = False
class DevConfig(Config):
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple'
class TestConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False
| true | true |
1c3cdc11b9340d299af2c3a24c583c03bafdf0d7 | 607 | py | Python | factual/query/submit.py | gvelez17/factual-python-driver | 8271e852e0e8e5d3fa4020cbad0b8211127ccd39 | [
"Apache-2.0"
] | 1 | 2020-08-15T22:53:37.000Z | 2020-08-15T22:53:37.000Z | factual/query/submit.py | gvelez17/factual-python-driver | 8271e852e0e8e5d3fa4020cbad0b8211127ccd39 | [
"Apache-2.0"
] | null | null | null | factual/query/submit.py | gvelez17/factual-python-driver | 8271e852e0e8e5d3fa4020cbad0b8211127ccd39 | [
"Apache-2.0"
] | null | null | null | from write import Write
class Submit(Write):
def __init__(self, api, table, factual_id, params={}):
Write.__init__(self, api, table, factual_id, params)
def values(self, values):
return self._copy({'values': values})
def clear_blanks():
return self._copy({'clear_blanks': True})
def _path(self):
path = 't/' + self.table
if self.factual_id:
path += '/' + self.factual_id
path += '/submit'
return path
def _copy(self, params):
return Submit(self.api, self.table, self.factual_id, self.merge_params(params))
| 27.590909 | 87 | 0.609555 | from write import Write
class Submit(Write):
def __init__(self, api, table, factual_id, params={}):
Write.__init__(self, api, table, factual_id, params)
def values(self, values):
return self._copy({'values': values})
def clear_blanks():
return self._copy({'clear_blanks': True})
def _path(self):
path = 't/' + self.table
if self.factual_id:
path += '/' + self.factual_id
path += '/submit'
return path
def _copy(self, params):
return Submit(self.api, self.table, self.factual_id, self.merge_params(params))
| true | true |
1c3cdcf1b8fd9593a7b0e2b97e1dc29749e67ab4 | 2,276 | py | Python | angstrom/angstrom.py | sighphyre/angstrom | b4bdaf1f626bf5b8b4176345bb01d32e825c2a74 | [
"Apache-2.0"
] | null | null | null | angstrom/angstrom.py | sighphyre/angstrom | b4bdaf1f626bf5b8b4176345bb01d32e825c2a74 | [
"Apache-2.0"
] | null | null | null | angstrom/angstrom.py | sighphyre/angstrom | b4bdaf1f626bf5b8b4176345bb01d32e825c2a74 | [
"Apache-2.0"
] | null | null | null | import sqlite3
from os import path
def mobj(mapping, result_set):
rows = []
for row in result_set:
new_row = mapping.copy()
for key, value in mapping.items():
new_row[key] = row[value]
rows.append(new_row)
return rows
def base_connector(db_name):
def connect():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
return conn
return connect
def file_system_sql_loader(sql_path):
def get_sql(filename):
with open(path.join(sql_path, filename)) as _file:
return _file.read()
return get_sql
class TransactionManager:
def __init__(self, connector):
self._connector = connector
self._conn = None
def __enter__(self):
self._conn = self._connector()
return self._conn
def __exit__(self, type, value, tb):
if tb:
self._conn.rollback()
else:
self._conn.commit()
self._conn.close()
class Db:
def __init__(self, connector, loader):
self._connect = connector
self._get_sql = loader
def start_transaction(self):
return TransactionManager(self._connect)
def execute_script(self, name):
with self._connect() as conn:
sql = self._get_sql(name)
conn.executescript(sql)
def execute_many(self, name, parameter_list, conn=None):
if conn:
self._execute_many(name, parameter_list, conn)
else:
with self._connect() as conn:
self._execute_many(name, parameter_list, conn)
def _execute_many(self, name, parameter_list, conn):
sql = self._get_sql(name)
conn.executemany(sql, parameter_list)
def execute_query(self, name, parameters=None, conn=None):
if conn:
return self._execute_query(name, conn, parameters)
else:
with self._connect() as conn:
return self._execute_query(name, conn, parameters)
def _execute_query(self, name, conn, parameters=None):
sql = self._get_sql(name)
cursor = conn.cursor()
if parameters:
cursor.execute(sql, parameters)
else:
cursor.execute(sql)
return cursor.fetchall()
| 27.095238 | 66 | 0.612039 | import sqlite3
from os import path
def mobj(mapping, result_set):
rows = []
for row in result_set:
new_row = mapping.copy()
for key, value in mapping.items():
new_row[key] = row[value]
rows.append(new_row)
return rows
def base_connector(db_name):
def connect():
conn = sqlite3.connect(db_name)
conn.row_factory = sqlite3.Row
return conn
return connect
def file_system_sql_loader(sql_path):
def get_sql(filename):
with open(path.join(sql_path, filename)) as _file:
return _file.read()
return get_sql
class TransactionManager:
def __init__(self, connector):
self._connector = connector
self._conn = None
def __enter__(self):
self._conn = self._connector()
return self._conn
def __exit__(self, type, value, tb):
if tb:
self._conn.rollback()
else:
self._conn.commit()
self._conn.close()
class Db:
def __init__(self, connector, loader):
self._connect = connector
self._get_sql = loader
def start_transaction(self):
return TransactionManager(self._connect)
def execute_script(self, name):
with self._connect() as conn:
sql = self._get_sql(name)
conn.executescript(sql)
def execute_many(self, name, parameter_list, conn=None):
if conn:
self._execute_many(name, parameter_list, conn)
else:
with self._connect() as conn:
self._execute_many(name, parameter_list, conn)
def _execute_many(self, name, parameter_list, conn):
sql = self._get_sql(name)
conn.executemany(sql, parameter_list)
def execute_query(self, name, parameters=None, conn=None):
if conn:
return self._execute_query(name, conn, parameters)
else:
with self._connect() as conn:
return self._execute_query(name, conn, parameters)
def _execute_query(self, name, conn, parameters=None):
sql = self._get_sql(name)
cursor = conn.cursor()
if parameters:
cursor.execute(sql, parameters)
else:
cursor.execute(sql)
return cursor.fetchall()
| true | true |
1c3cdd0b09bfdc9dbd58fdb3c73064dd0e22510a | 5,115 | py | Python | assignment1/training.py | WhiteHyun/MachineLearning | 4c766d0abc03a3823a71f36bbbe7ad90736a20f0 | [
"MIT"
] | 2 | 2021-04-18T06:25:16.000Z | 2021-04-28T15:10:17.000Z | assignment1/training.py | WhiteHyun/MachineLearning | 4c766d0abc03a3823a71f36bbbe7ad90736a20f0 | [
"MIT"
] | 1 | 2021-04-20T06:56:32.000Z | 2021-04-22T16:43:36.000Z | assignment1/training.py | WhiteHyun/MachineLearning | 4c766d0abc03a3823a71f36bbbe7ad90736a20f0 | [
"MIT"
] | null | null | null | import random
import math
random.seed(1)
def randn(size):
"""난수 생성
"""
return [random.random() for _ in range(size+1)]
class MultiLayerPerceptron:
def __init__(self, ni, nh, no, dataset, epochs=5000) -> None:
"""퍼셉트론 네트워크 초기화
"""
self.model = []
hidden_layer = [
{'weights': randn(ni)} for _ in range(nh)
]
output_layer = [
{'weights': randn(nh)}for _ in range(no)
]
self.model.append(hidden_layer)
self.model.append(output_layer)
self.dataset = dataset
self.epochs = epochs
def weight_sum(self, weights, inputs):
"""각각의 가중치계산 후 결과값을 리턴합니다.
"""
sum = weights[-1] # 바이어스 값은 노드가 1이므로 미리 할당합니다.
for i in range(len(weights)-1):
sum += weights[i]*inputs[i]
return sum
def activation_func(self, x):
"""활성함수입니다.
"""
# return max(0, x) # ReLU
return 1.0/(1.0+math.exp(-x)) # sigmoid
def activation_func_grad(self, x):
"""활성함수 미분계수 값입니다.
"""
# return 1 if x > 0 else 0 # ReLU
return x*(1.0-x) # sigmoid
def feed_foward(self, data):
"""순전파 수행
"""
inputs = data
# 각각의 layer를 지남
for layer in self.model:
outputs = []
# layer들 중 노드들을 통해 가중치 계산
for node in layer:
zsum_or_osum = self.weight_sum(node['weights'], inputs)
# 각 가중치 합을 계산하여 활성함수를 적용한 output을 해당 layer-node에 output을 key값으로 하여 적용
node['output'] = self.activation_func(zsum_or_osum)
outputs.append(node['output'])
inputs = outputs
return inputs
def backward(self, label):
"""역전파 알고리즘입니다.
스토캐스틱 경사하강법(SGD)을 채택하였습니다.
"""
# 출력 레이어(층) -> 입력 레이어(층) 순서로 역전파 진행
for i in reversed(range(len(self.model))):
layer = self.model[i]
errors = [] # 계산할 에러
if i == len(self.model)-1: # 출력층인 경우
for j in range(len(layer)):
node = layer[j]
errors.append(label[j] - node['output'])
else:
for j in range(len(layer)):
error = 0.0
for node in self.model[i+1]: # 다음 레이어에 대해
error += (node['weights'][j]*node['delta'])
errors.append(error)
for j in range(len(layer)):
node = layer[j]
node['delta'] = errors[j] * \
self.activation_func_grad(node['output'])
def update(self, train_set, lr):
"""weight update 함수
"""
for i in range(len(self.model)):
inputs = train_set[:-1] if i == 0 else [node['output']
for node in self.model[i-1]]
for node in self.model[i]:
for j in range(len(inputs)):
node['weights'][j] += lr * node['delta'] * \
inputs[j] # 역전파 할 때 곱해야할 노드값까지 계산
node['weights'][-1] += lr * \
node['delta'] # bias의 노드는 항상 1임
def train(self, lr=0.5, verbose=False):
"""주어진 dataset을 가지고 학습합니다.
Parameters
----------
lr : float
verbose : bool
epoch과 에러율을 보여줍니다.
"""
for epoch in range(self.epochs):
error = 0.0
for train_set in self.dataset:
# forward
outputs = self.feed_foward(train_set)
# one hot vector로 구성
label = [0 for _ in range(
len(set([row[-1] for row in self.dataset])))]
label[train_set[-1]] = 1
# 표기할 오차
error += sum((label[i]-outputs[i]) **
2 for i in range(len(label)))
# backward
self.backward(label)
# update
self.update(train_set, lr)
if verbose and epoch % 100 == 0:
print(f"epoch: {epoch}, error: {error/len(self.dataset):.3f}")
if __name__ == "__main__":
dataset = [[3.5064385449265267, 2.34547092892632525, 0],
[4.384621956392097, 3.4530853889904205, 0],
[4.841442919897487, 4.02507852317520154, 0],
[3.5985868973088437, 4.1621314217538705, 0],
[2.887219775424049, 3.31523082529190005, 0],
[9.79822645535526, 1.1052409596099566, 1],
[7.8261241795117422, 0.6711054766067182, 1],
[2.5026163932400305, 5.800780055043912, 1],
[5.032436157202415, 8.650625621472184, 1],
[4.095084253434162, 7.69104329159447, 1]]
len_input_nodes = len(dataset[0])-1
len_hidden_nodes = 2
len_output_nodes = len(set(map(lambda x: x[-1], dataset)))
epochs = int(input("epochs: "))
network = MultiLayerPerceptron(
len_input_nodes, len_hidden_nodes, len_output_nodes, dataset, epochs)
network.train(verbose=True)
| 32.788462 | 85 | 0.494819 | import random
import math
random.seed(1)
def randn(size):
return [random.random() for _ in range(size+1)]
class MultiLayerPerceptron:
def __init__(self, ni, nh, no, dataset, epochs=5000) -> None:
self.model = []
hidden_layer = [
{'weights': randn(ni)} for _ in range(nh)
]
output_layer = [
{'weights': randn(nh)}for _ in range(no)
]
self.model.append(hidden_layer)
self.model.append(output_layer)
self.dataset = dataset
self.epochs = epochs
def weight_sum(self, weights, inputs):
sum = weights[-1]
for i in range(len(weights)-1):
sum += weights[i]*inputs[i]
return sum
def activation_func(self, x):
return 1.0/(1.0+math.exp(-x))
def activation_func_grad(self, x):
return x*(1.0-x)
def feed_foward(self, data):
inputs = data
for layer in self.model:
outputs = []
for node in layer:
zsum_or_osum = self.weight_sum(node['weights'], inputs)
node['output'] = self.activation_func(zsum_or_osum)
outputs.append(node['output'])
inputs = outputs
return inputs
def backward(self, label):
for i in reversed(range(len(self.model))):
layer = self.model[i]
errors = []
if i == len(self.model)-1:
for j in range(len(layer)):
node = layer[j]
errors.append(label[j] - node['output'])
else:
for j in range(len(layer)):
error = 0.0
for node in self.model[i+1]:
error += (node['weights'][j]*node['delta'])
errors.append(error)
for j in range(len(layer)):
node = layer[j]
node['delta'] = errors[j] * \
self.activation_func_grad(node['output'])
def update(self, train_set, lr):
for i in range(len(self.model)):
inputs = train_set[:-1] if i == 0 else [node['output']
for node in self.model[i-1]]
for node in self.model[i]:
for j in range(len(inputs)):
node['weights'][j] += lr * node['delta'] * \
inputs[j]
node['weights'][-1] += lr * \
node['delta']
def train(self, lr=0.5, verbose=False):
for epoch in range(self.epochs):
error = 0.0
for train_set in self.dataset:
outputs = self.feed_foward(train_set)
label = [0 for _ in range(
len(set([row[-1] for row in self.dataset])))]
label[train_set[-1]] = 1
error += sum((label[i]-outputs[i]) **
2 for i in range(len(label)))
self.backward(label)
self.update(train_set, lr)
if verbose and epoch % 100 == 0:
print(f"epoch: {epoch}, error: {error/len(self.dataset):.3f}")
if __name__ == "__main__":
dataset = [[3.5064385449265267, 2.34547092892632525, 0],
[4.384621956392097, 3.4530853889904205, 0],
[4.841442919897487, 4.02507852317520154, 0],
[3.5985868973088437, 4.1621314217538705, 0],
[2.887219775424049, 3.31523082529190005, 0],
[9.79822645535526, 1.1052409596099566, 1],
[7.8261241795117422, 0.6711054766067182, 1],
[2.5026163932400305, 5.800780055043912, 1],
[5.032436157202415, 8.650625621472184, 1],
[4.095084253434162, 7.69104329159447, 1]]
len_input_nodes = len(dataset[0])-1
len_hidden_nodes = 2
len_output_nodes = len(set(map(lambda x: x[-1], dataset)))
epochs = int(input("epochs: "))
network = MultiLayerPerceptron(
len_input_nodes, len_hidden_nodes, len_output_nodes, dataset, epochs)
network.train(verbose=True)
| true | true |
1c3cdd1a654bc85ef1a1f83e26db1a588b93120f | 6,116 | py | Python | mmwave/data/logger.py | vilari-mickopf/mmwave-gesture-recognition | a93f404c49c3797e441d456830e06f540abc4032 | [
"MIT"
] | 16 | 2021-02-23T02:28:47.000Z | 2022-03-28T02:49:28.000Z | mmwave/data/logger.py | f12markovic/mmwave-gesture-recognition | a93f404c49c3797e441d456830e06f540abc4032 | [
"MIT"
] | 7 | 2021-09-13T09:38:41.000Z | 2022-03-04T07:29:06.000Z | mmwave/data/logger.py | f12markovic/mmwave-gesture-recognition | a93f404c49c3797e441d456830e06f540abc4032 | [
"MIT"
] | 3 | 2021-06-13T20:27:21.000Z | 2021-11-06T06:00:05.000Z | #! /usr/bin/env python
import os
import time
import pickle
import pandas as pd
from tqdm import tqdm
from mmwave.data.formats import GESTURE
from mmwave.utils.utility_functions import print
import colorama
from colorama import Fore
colorama.init(autoreset=True)
class Logger:
def __init__(self, gesture=None):
self.logging = False
self.gesture = gesture
self.log_file = ''
self.detected_time = 0
self.empty_frames = ''
self.frame_num = 0
def __set_file(self):
last_sample = self.get_last_sample(self.gesture)
if last_sample is None:
self.log_file = os.path.join(last_sample, 'sample_1.csv')
return
save_dir = os.path.dirname(last_sample)
last_sample_name = os.path.splitext(last_sample)[0]
num = int(os.path.basename(last_sample_name).split('_')[1]) + 1
self.log_file = os.path.join(save_dir, 'sample_' + str(num) + '.csv')
print(f'Sample number: {num}')
def set_gesture(self, gesture):
self.gesture = gesture
def log(self, frame):
if not self.logging:
self.__set_file()
self.logging = True
self.detected_time = time.perf_counter()
print('Saving...')
if (frame is not None and
frame.get('tlvs') is not None and
frame['tlvs'].get(1) is not None):
self.detected_time = time.perf_counter()
with open(self.log_file, 'a') as f:
if self.frame_num == 0:
f.write('frame,x,y,range_idx,peak_value,doppler_idx,xyz_q_format\n')
for obj in frame['tlvs'][1]['values']['objs']:
f.write(self.empty_frames)
f.write(str(self.frame_num) + ',')
f.write(str(obj['x_coord']) + ',')
f.write(str(obj['y_coord']) + ',')
f.write(str(obj['range_idx']) + ',')
f.write(str(obj['peak_value']) + ',')
f.write(str(obj['doppler_idx']) + ',')
f.write(str(frame['tlvs'][1]['values']['descriptor']['xyz_q_format']) + '\n')
self.empty_frames = ''
self.frame_num += 1
elif self.frame_num != 0:
self.empty_frames = (self.empty_frames +
str(self.frame_num) + ',')
self.empty_frames = (self.empty_frames +
'None, None, None, None, None\n')
self.frame_num += 1
if time.perf_counter() - self.detected_time > .5:
if os.path.isfile(self.log_file):
print('Sample saved.\n')
else:
print('Nothing to save.\n')
self.empty_frames = ''
self.logging = False
self.frame_num = 0
return True
return False
@staticmethod
def get_last_sample(gesture):
if isinstance(gesture, str):
gesture = GESTURE[gesture.upper()]
save_dir = gesture.get_dir()
if os.listdir(save_dir) == []:
return
nums = []
for f in os.listdir(save_dir):
num = os.path.splitext(f)[0].split('_')[1]
nums.append(int(num))
last_sample = 'sample_' + str(max(nums)) + '.csv'
return os.path.join(save_dir, last_sample)
def discard_last_sample(self):
last_sample = self.get_last_sample(self.gesture)
if last_sample is None:
print('No files.')
return
os.remove(last_sample)
print('File deleted.')
@staticmethod
def get_data(gesture):
if isinstance(gesture, str):
gesture = GESTURE[gesture.upper()]
save_dir = gesture.get_dir()
for f in tqdm(os.listdir(save_dir), desc='Files', leave=False):
df = pd.read_csv(os.path.join(save_dir, f))
num_of_frames = df.iloc[-1]['frame'] + 1
sample = [[] for _ in range(num_of_frames)]
for _, row in df.iterrows():
if row['x'] == 'None':
obj = 5*[0.]
else:
obj = [
float(row['x'])/65535.,
float(row['y'])/65535.,
float(row['range_idx'])/65535.,
float(row['peak_value'])/65535.,
float(row['doppler_idx'])/65535.
]
sample[row['frame']].append(obj)
yield sample
@staticmethod
def get_stats(X, y):
num_of_classes = len(set(y))
print(f'Number of classes: {num_of_classes}')
sample_with_max_num_of_frames = max(X, key=lambda sample: len(sample))
max_num_of_frames = len(sample_with_max_num_of_frames)
print(f'Maximum number of frames: {max_num_of_frames}')
sample_with_max_num_of_objs = max(
X, key=lambda sample: [len(frame) for frame in sample]
)
frame_with_max_num_of_objs = max(
sample_with_max_num_of_objs, key=lambda obj: len(obj)
)
max_num_of_objs = len(frame_with_max_num_of_objs)
print(f'Maximum num of objects: {max_num_of_objs}')
return max_num_of_frames, max_num_of_objs, num_of_classes
@staticmethod
def get_all_data(refresh_data=False):
X_file = os.path.join(os.path.dirname(__file__), '.X_data')
y_file = os.path.join(os.path.dirname(__file__), '.y_data')
if refresh_data:
X = []
y = []
for gesture in tqdm(GESTURE, desc='Gestures'):
for sample in Logger.get_data(gesture):
X.append(sample)
y.append(gesture.value)
pickle.dump(X, open(X_file, 'wb'))
pickle.dump(y, open(y_file, 'wb'))
else:
print('Loading cached data...', end='')
X = pickle.load(open(X_file, 'rb'))
y = pickle.load(open(y_file, 'rb'))
print(f'{Fore.GREEN}Done.')
return X, y
| 33.604396 | 97 | 0.535154 |
import os
import time
import pickle
import pandas as pd
from tqdm import tqdm
from mmwave.data.formats import GESTURE
from mmwave.utils.utility_functions import print
import colorama
from colorama import Fore
colorama.init(autoreset=True)
class Logger:
def __init__(self, gesture=None):
self.logging = False
self.gesture = gesture
self.log_file = ''
self.detected_time = 0
self.empty_frames = ''
self.frame_num = 0
def __set_file(self):
last_sample = self.get_last_sample(self.gesture)
if last_sample is None:
self.log_file = os.path.join(last_sample, 'sample_1.csv')
return
save_dir = os.path.dirname(last_sample)
last_sample_name = os.path.splitext(last_sample)[0]
num = int(os.path.basename(last_sample_name).split('_')[1]) + 1
self.log_file = os.path.join(save_dir, 'sample_' + str(num) + '.csv')
print(f'Sample number: {num}')
def set_gesture(self, gesture):
self.gesture = gesture
def log(self, frame):
if not self.logging:
self.__set_file()
self.logging = True
self.detected_time = time.perf_counter()
print('Saving...')
if (frame is not None and
frame.get('tlvs') is not None and
frame['tlvs'].get(1) is not None):
self.detected_time = time.perf_counter()
with open(self.log_file, 'a') as f:
if self.frame_num == 0:
f.write('frame,x,y,range_idx,peak_value,doppler_idx,xyz_q_format\n')
for obj in frame['tlvs'][1]['values']['objs']:
f.write(self.empty_frames)
f.write(str(self.frame_num) + ',')
f.write(str(obj['x_coord']) + ',')
f.write(str(obj['y_coord']) + ',')
f.write(str(obj['range_idx']) + ',')
f.write(str(obj['peak_value']) + ',')
f.write(str(obj['doppler_idx']) + ',')
f.write(str(frame['tlvs'][1]['values']['descriptor']['xyz_q_format']) + '\n')
self.empty_frames = ''
self.frame_num += 1
elif self.frame_num != 0:
self.empty_frames = (self.empty_frames +
str(self.frame_num) + ',')
self.empty_frames = (self.empty_frames +
'None, None, None, None, None\n')
self.frame_num += 1
if time.perf_counter() - self.detected_time > .5:
if os.path.isfile(self.log_file):
print('Sample saved.\n')
else:
print('Nothing to save.\n')
self.empty_frames = ''
self.logging = False
self.frame_num = 0
return True
return False
@staticmethod
def get_last_sample(gesture):
if isinstance(gesture, str):
gesture = GESTURE[gesture.upper()]
save_dir = gesture.get_dir()
if os.listdir(save_dir) == []:
return
nums = []
for f in os.listdir(save_dir):
num = os.path.splitext(f)[0].split('_')[1]
nums.append(int(num))
last_sample = 'sample_' + str(max(nums)) + '.csv'
return os.path.join(save_dir, last_sample)
def discard_last_sample(self):
last_sample = self.get_last_sample(self.gesture)
if last_sample is None:
print('No files.')
return
os.remove(last_sample)
print('File deleted.')
@staticmethod
def get_data(gesture):
if isinstance(gesture, str):
gesture = GESTURE[gesture.upper()]
save_dir = gesture.get_dir()
for f in tqdm(os.listdir(save_dir), desc='Files', leave=False):
df = pd.read_csv(os.path.join(save_dir, f))
num_of_frames = df.iloc[-1]['frame'] + 1
sample = [[] for _ in range(num_of_frames)]
for _, row in df.iterrows():
if row['x'] == 'None':
obj = 5*[0.]
else:
obj = [
float(row['x'])/65535.,
float(row['y'])/65535.,
float(row['range_idx'])/65535.,
float(row['peak_value'])/65535.,
float(row['doppler_idx'])/65535.
]
sample[row['frame']].append(obj)
yield sample
@staticmethod
def get_stats(X, y):
num_of_classes = len(set(y))
print(f'Number of classes: {num_of_classes}')
sample_with_max_num_of_frames = max(X, key=lambda sample: len(sample))
max_num_of_frames = len(sample_with_max_num_of_frames)
print(f'Maximum number of frames: {max_num_of_frames}')
sample_with_max_num_of_objs = max(
X, key=lambda sample: [len(frame) for frame in sample]
)
frame_with_max_num_of_objs = max(
sample_with_max_num_of_objs, key=lambda obj: len(obj)
)
max_num_of_objs = len(frame_with_max_num_of_objs)
print(f'Maximum num of objects: {max_num_of_objs}')
return max_num_of_frames, max_num_of_objs, num_of_classes
@staticmethod
def get_all_data(refresh_data=False):
X_file = os.path.join(os.path.dirname(__file__), '.X_data')
y_file = os.path.join(os.path.dirname(__file__), '.y_data')
if refresh_data:
X = []
y = []
for gesture in tqdm(GESTURE, desc='Gestures'):
for sample in Logger.get_data(gesture):
X.append(sample)
y.append(gesture.value)
pickle.dump(X, open(X_file, 'wb'))
pickle.dump(y, open(y_file, 'wb'))
else:
print('Loading cached data...', end='')
X = pickle.load(open(X_file, 'rb'))
y = pickle.load(open(y_file, 'rb'))
print(f'{Fore.GREEN}Done.')
return X, y
| true | true |
1c3cdda3bb81cfe733d36d36a57fcc3d679c170a | 2,449 | py | Python | examples/Carl_Leake_Dissertation/Chapter_3/Example_3_2_spectral_method.py | leakec/tfc | f814be4643270498a68bb0859720191ff7216012 | [
"MIT"
] | 15 | 2021-01-04T16:30:59.000Z | 2022-03-26T22:12:45.000Z | examples/Carl_Leake_Dissertation/Chapter_3/Example_3_2_spectral_method.py | leakec/tfc | f814be4643270498a68bb0859720191ff7216012 | [
"MIT"
] | 3 | 2021-12-10T23:17:56.000Z | 2022-03-12T18:39:18.000Z | examples/Carl_Leake_Dissertation/Chapter_3/Example_3_2_spectral_method.py | leakec/tfc | f814be4643270498a68bb0859720191ff7216012 | [
"MIT"
] | 2 | 2021-04-27T10:34:20.000Z | 2022-02-25T13:02:49.000Z | # Import python packages
from tqdm import tqdm
import numpy as onp
import jax.numpy as np
from jax import jacfwd
from matplotlib import cm
# Import TFC classes
from tfc import mtfc
from tfc.utils import egrad
from tfc.utils.Latex import table
# Constants and switches:
nVec = [5,10,15,20,25,30]
mVec = [5,10,15,20,25]
x0 = np.array([0.,0.])
xf = np.array([1.,1.])
testErr = onp.zeros((len(nVec),len(mVec)))
# Real analytical solution:
real = lambda x,y: np.exp(-x)*(x+y**3)
# Solve the problem for the various n and m values
for j,n in enumerate(tqdm(nVec)):
for k,m in enumerate(mVec):
# Create the TFC Class:
N = [n,]*2
nC = [-1,]*2
tfc = mtfc(N,nC,m,dim=2,basis='CP',x0=x0,xf=xf)
x = tfc.x
if tfc.basisClass.numBasisFunc > n**2:
testErr[j,k] = np.nan
continue
# Get the boundary data points
x0ind = np.where(x[0]==0.)[0]
xfind = np.where(x[0]==1.)[0]
y0ind = np.where(x[1]==0.)[0]
yfind = np.where(x[1]==1.)[0]
# Get the basis functions
H = tfc.H
# Create the spectral solution form
u = lambda xi,*x: np.dot(H(*x),xi)
# Create the residual
laplace = lambda xi,*x: egrad(egrad(u,1),1)(xi,*x)+egrad(egrad(u,2),2)(xi,*x)
L = lambda xi,*x: laplace(xi,*x)-np.exp(-x[0])*(x[0]-2.+x[1]**3+6.*x[1])
# Calculate the A and B matrices
zXi = np.zeros((tfc.basisClass.numBasisFunc))
A = np.vstack([jacfwd(L,0)(zXi,*x),
H(x[0][x0ind],x[1][x0ind]),
H(x[0][xfind],x[1][xfind]),
H(x[0][y0ind],x[1][y0ind]),
H(x[0][yfind],x[1][yfind])])
B = np.hstack([-L(zXi,*x),
x[1][x0ind]**3,
(1.+x[1][xfind]**3)*np.exp(-1.),
x[0][y0ind]*np.exp(-x[0][y0ind]),
(x[0][yfind]+1.)*np.exp(-x[0][yfind])])
# Calculate the xi values
xi = np.dot(np.linalg.pinv(A),B)
# Calculate the error
dark = np.meshgrid(np.linspace(x0[0],xf[0],n),np.linspace(x0[1],xf[1],n))
x = (dark[0].flatten(),dark[1].flatten())
ur = real(*x)
ue = u(xi,*x)
err = ur-ue
testErr[j,k] = np.max(np.abs(err))
# Print results as a table
tab = table.SimpleTable(testErr)
print(tab)
f = open("SpectralData.txt","w")
f.write(tab)
f.close()
| 28.476744 | 85 | 0.520212 |
from tqdm import tqdm
import numpy as onp
import jax.numpy as np
from jax import jacfwd
from matplotlib import cm
from tfc import mtfc
from tfc.utils import egrad
from tfc.utils.Latex import table
nVec = [5,10,15,20,25,30]
mVec = [5,10,15,20,25]
x0 = np.array([0.,0.])
xf = np.array([1.,1.])
testErr = onp.zeros((len(nVec),len(mVec)))
real = lambda x,y: np.exp(-x)*(x+y**3)
for j,n in enumerate(tqdm(nVec)):
for k,m in enumerate(mVec):
N = [n,]*2
nC = [-1,]*2
tfc = mtfc(N,nC,m,dim=2,basis='CP',x0=x0,xf=xf)
x = tfc.x
if tfc.basisClass.numBasisFunc > n**2:
testErr[j,k] = np.nan
continue
x0ind = np.where(x[0]==0.)[0]
xfind = np.where(x[0]==1.)[0]
y0ind = np.where(x[1]==0.)[0]
yfind = np.where(x[1]==1.)[0]
H = tfc.H
u = lambda xi,*x: np.dot(H(*x),xi)
laplace = lambda xi,*x: egrad(egrad(u,1),1)(xi,*x)+egrad(egrad(u,2),2)(xi,*x)
L = lambda xi,*x: laplace(xi,*x)-np.exp(-x[0])*(x[0]-2.+x[1]**3+6.*x[1])
zXi = np.zeros((tfc.basisClass.numBasisFunc))
A = np.vstack([jacfwd(L,0)(zXi,*x),
H(x[0][x0ind],x[1][x0ind]),
H(x[0][xfind],x[1][xfind]),
H(x[0][y0ind],x[1][y0ind]),
H(x[0][yfind],x[1][yfind])])
B = np.hstack([-L(zXi,*x),
x[1][x0ind]**3,
(1.+x[1][xfind]**3)*np.exp(-1.),
x[0][y0ind]*np.exp(-x[0][y0ind]),
(x[0][yfind]+1.)*np.exp(-x[0][yfind])])
xi = np.dot(np.linalg.pinv(A),B)
dark = np.meshgrid(np.linspace(x0[0],xf[0],n),np.linspace(x0[1],xf[1],n))
x = (dark[0].flatten(),dark[1].flatten())
ur = real(*x)
ue = u(xi,*x)
err = ur-ue
testErr[j,k] = np.max(np.abs(err))
tab = table.SimpleTable(testErr)
print(tab)
f = open("SpectralData.txt","w")
f.write(tab)
f.close()
| true | true |
1c3cddac2563e87b8cdc0c21a1fec79a0cd7b5f9 | 2,066 | py | Python | homeassistant/components/shiftr/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 23 | 2017-11-15T21:03:53.000Z | 2021-03-29T21:33:48.000Z | homeassistant/components/shiftr/__init__.py | domwillcode/home-assistant | f170c80bea70c939c098b5c88320a1c789858958 | [
"Apache-2.0"
] | 47 | 2020-07-23T07:13:11.000Z | 2022-03-31T06:01:46.000Z | homeassistant/components/shiftr/__init__.py | klauern/home-assistant-core | c18ba6aec0627e6afb6442c678edb5ff2bb17db6 | [
"Apache-2.0"
] | 10 | 2018-01-01T00:12:51.000Z | 2021-12-21T23:08:05.000Z | """Support for Shiftr.io."""
import logging
import paho.mqtt.client as mqtt
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "shiftr"
SHIFTR_BROKER = "broker.shiftr.io"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Initialize the Shiftr.io MQTT consumer."""
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
client_id = "HomeAssistant"
port = 1883
keepalive = 600
mqttc = mqtt.Client(client_id, protocol=mqtt.MQTTv311)
mqttc.username_pw_set(username, password=password)
mqttc.connect(SHIFTR_BROKER, port=port, keepalive=keepalive)
def stop_shiftr(event):
"""Stop the Shiftr.io MQTT component."""
mqttc.disconnect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_shiftr)
def shiftr_event_listener(event):
"""Listen for new messages on the bus and sends them to Shiftr.io."""
state = event.data.get("new_state")
topic = state.entity_id.replace(".", "/")
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
try:
mqttc.publish(topic, _state, qos=0, retain=False)
if state.attributes:
for attribute, data in state.attributes.items():
mqttc.publish(
f"/{topic}/{attribute}", str(data), qos=0, retain=False
)
except RuntimeError:
pass
hass.bus.listen(EVENT_STATE_CHANGED, shiftr_event_listener)
return True
| 26.151899 | 79 | 0.636012 | import logging
import paho.mqtt.client as mqtt
import voluptuous as vol
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
EVENT_STATE_CHANGED,
)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "shiftr"
SHIFTR_BROKER = "broker.shiftr.io"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
conf = config[DOMAIN]
username = conf.get(CONF_USERNAME)
password = conf.get(CONF_PASSWORD)
client_id = "HomeAssistant"
port = 1883
keepalive = 600
mqttc = mqtt.Client(client_id, protocol=mqtt.MQTTv311)
mqttc.username_pw_set(username, password=password)
mqttc.connect(SHIFTR_BROKER, port=port, keepalive=keepalive)
def stop_shiftr(event):
mqttc.disconnect()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_shiftr)
def shiftr_event_listener(event):
state = event.data.get("new_state")
topic = state.entity_id.replace(".", "/")
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
try:
mqttc.publish(topic, _state, qos=0, retain=False)
if state.attributes:
for attribute, data in state.attributes.items():
mqttc.publish(
f"/{topic}/{attribute}", str(data), qos=0, retain=False
)
except RuntimeError:
pass
hass.bus.listen(EVENT_STATE_CHANGED, shiftr_event_listener)
return True
| true | true |
1c3cde305aec7ba5054cd2603f156a38fb90a6a4 | 13,576 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_endpoint_control_forticlient_registration_sync.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_endpoint_control_forticlient_registration_sync.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/fortinet/fortios/plugins/modules/fortios_endpoint_control_forticlient_registration_sync.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_forticlient_registration_sync
short_description: Configure FortiClient registration synchronization settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify endpoint_control feature and forticlient_registration_sync category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
endpoint_control_forticlient_registration_sync:
description:
- Configure FortiClient registration synchronization settings.
default: null
type: dict
suboptions:
peer_ip:
description:
- IP address of the peer FortiGate for endpoint license synchronization.
type: str
peer_name:
description:
- Peer name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_endpoint_control_forticlient_registration_sync
fortios_endpoint_control_forticlient_registration_sync:
vdom: root
state: present
endpoint_control_forticlient_registration_sync:
peer_ip: 1.1.1.1
peer_name: '1'
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_endpoint_control_forticlient_registration_sync_data(json):
option_list = ['peer_ip', 'peer_name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def endpoint_control_forticlient_registration_sync(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
endpoint_control_forticlient_registration_sync_data = data['endpoint_control_forticlient_registration_sync']
filtered_data = underscore_to_hyphen(filter_endpoint_control_forticlient_registration_sync_data(endpoint_control_forticlient_registration_sync_data))
# check_mode starts from here
if check_mode:
mkey = fos.get_mkey('endpoint_control', 'forticlient_registration_sync', filtered_data, vdom=vdom)
current_data = fos.get('endpoint_control', 'forticlient_registration_sync', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
# 2. if it exists and the state is 'present' then compare current settings with desired
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
# if mkey exists then compare each other
# record exits and they're matched or not
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('endpoint-control',
'forticlient-registration-sync',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('endpoint-control',
'forticlient-registration-sync',
mkey=filtered_data['peer-name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_endpoint_control(data, fos, check_mode):
fos.do_member_operation('endpoint_control_forticlient_registration_sync')
if data['endpoint_control_forticlient_registration_sync']:
resp = endpoint_control_forticlient_registration_sync(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('endpoint_control_forticlient_registration_sync'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"peer_ip": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"peer_name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'peer-name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"endpoint_control_forticlient_registration_sync": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["endpoint_control_forticlient_registration_sync"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["endpoint_control_forticlient_registration_sync"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "endpoint_control_forticlient_registration_sync")
is_error, has_changed, result = fortios_endpoint_control(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 34.899743 | 153 | 0.660283 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_endpoint_control_forticlient_registration_sync
short_description: Configure FortiClient registration synchronization settings in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify endpoint_control feature and forticlient_registration_sync category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.0
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
member_path:
type: str
description:
- Member attribute path to operate on.
- Delimited by a slash character if there are more than one attribute.
- Parameter marked with member_path is legitimate for doing member operation.
member_state:
type: str
description:
- Add or delete a member under specified attribute path.
- When member_state is specified, the state option is ignored.
choices:
- present
- absent
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
endpoint_control_forticlient_registration_sync:
description:
- Configure FortiClient registration synchronization settings.
default: null
type: dict
suboptions:
peer_ip:
description:
- IP address of the peer FortiGate for endpoint license synchronization.
type: str
peer_name:
description:
- Peer name.
type: str
'''
EXAMPLES = '''
- collections:
- fortinet.fortios
connection: httpapi
hosts: fortigate01
vars:
ansible_httpapi_port: 443
ansible_httpapi_use_ssl: true
ansible_httpapi_validate_certs: false
vdom: root
tasks:
- name: fortios_endpoint_control_forticlient_registration_sync
fortios_endpoint_control_forticlient_registration_sync:
vdom: root
state: present
endpoint_control_forticlient_registration_sync:
peer_ip: 1.1.1.1
peer_name: '1'
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import schema_to_module_spec
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_schema_versioning
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import is_same_comparison
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.comparison import serialize
def filter_endpoint_control_forticlient_registration_sync_data(json):
option_list = ['peer_ip', 'peer_name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def endpoint_control_forticlient_registration_sync(data, fos, check_mode=False):
vdom = data['vdom']
state = data['state']
endpoint_control_forticlient_registration_sync_data = data['endpoint_control_forticlient_registration_sync']
filtered_data = underscore_to_hyphen(filter_endpoint_control_forticlient_registration_sync_data(endpoint_control_forticlient_registration_sync_data))
if check_mode:
mkey = fos.get_mkey('endpoint_control', 'forticlient_registration_sync', filtered_data, vdom=vdom)
current_data = fos.get('endpoint_control', 'forticlient_registration_sync', vdom=vdom, mkey=mkey)
is_existed = current_data and current_data.get('http_status') == 200 \
and isinstance(current_data.get('results'), list) \
and len(current_data['results']) > 0
if state == 'present' or state is True:
if mkey is None:
return False, True, filtered_data
if is_existed:
is_same = is_same_comparison(
serialize(current_data['results'][0]), serialize(filtered_data))
return False, not is_same, filtered_data
# record does not exist
return False, True, filtered_data
if state == 'absent':
if mkey is None:
return False, False, filtered_data
if is_existed:
return False, True, filtered_data
return False, False, filtered_data
return True, False, {'reason: ': 'Must provide state parameter'}
if state == "present" or state is True:
return fos.set('endpoint-control',
'forticlient-registration-sync',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('endpoint-control',
'forticlient-registration-sync',
mkey=filtered_data['peer-name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' or \
'http_status' in resp and resp['http_status'] == 200 or \
'http_method' in resp and resp['http_method'] == "DELETE" and resp['http_status'] == 404
def fortios_endpoint_control(data, fos, check_mode):
fos.do_member_operation('endpoint_control_forticlient_registration_sync')
if data['endpoint_control_forticlient_registration_sync']:
resp = endpoint_control_forticlient_registration_sync(data, fos, check_mode)
else:
fos._module.fail_json(msg='missing task body: %s' % ('endpoint_control_forticlient_registration_sync'))
if check_mode:
return resp
return not is_successful_status(resp), \
is_successful_status(resp) and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
versioned_schema = {
"type": "list",
"children": {
"peer_ip": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
},
"peer_name": {
"type": "string",
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
},
"revisions": {
"v6.0.11": True,
"v6.0.0": True,
"v6.0.5": True
}
}
def main():
module_spec = schema_to_module_spec(versioned_schema)
mkeyname = 'peer-name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"member_path": {"required": False, "type": "str"},
"member_state": {
"type": "str",
"required": False,
"choices": ["present", "absent"]
},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"endpoint_control_forticlient_registration_sync": {
"required": False, "type": "dict", "default": None,
"options": {
}
}
}
for attribute_name in module_spec['options']:
fields["endpoint_control_forticlient_registration_sync"]['options'][attribute_name] = module_spec['options'][attribute_name]
if mkeyname and mkeyname == attribute_name:
fields["endpoint_control_forticlient_registration_sync"]['options'][attribute_name]['required'] = True
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module, mkeyname)
versions_check_result = check_schema_versioning(fos, versioned_schema, "endpoint_control_forticlient_registration_sync")
is_error, has_changed, result = fortios_endpoint_control(module.params, fos, module.check_mode)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and your playbook, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| true | true |
1c3cdf8dd7a023951b80190dd8425c6b825b6401 | 901 | py | Python | chewie/utils.py | snak1219/chewie | 346841e1ba16324ff302df9b67897721be112f0c | [
"Apache-2.0"
] | 1 | 2021-05-23T14:47:53.000Z | 2021-05-23T14:47:53.000Z | chewie/utils.py | snak1219/chewie | 346841e1ba16324ff302df9b67897721be112f0c | [
"Apache-2.0"
] | null | null | null | chewie/utils.py | snak1219/chewie | 346841e1ba16324ff302df9b67897721be112f0c | [
"Apache-2.0"
] | 2 | 2021-02-27T09:46:02.000Z | 2021-08-06T03:12:20.000Z | """Utility Functions"""
import logging
from collections import namedtuple # pytype: disable=pyi-error
def get_logger(logname):
"""Create and return a logger object."""
logger = logging.getLogger(logname)
return logger
def log_method(method):
"""Generate method for logging"""
def wrapped(self, *args, **kwargs):
"""Method that gets called for logging"""
self.logger.info('Entering %s' % method.__name__)
return method(self, *args, **kwargs)
return wrapped
class MessageParseError(Exception):
"""Error for when parsing cannot be successfully completed."""
pass
class EapQueueMessage(namedtuple('EapQueueMessage',
'message src_mac port_mac')):
pass
class RadiusQueueMessage(namedtuple('RadiusQueueMessage',
'message src_mac identity state port_mac')):
pass
| 25.027778 | 80 | 0.651498 | import logging
from collections import namedtuple
def get_logger(logname):
logger = logging.getLogger(logname)
return logger
def log_method(method):
def wrapped(self, *args, **kwargs):
self.logger.info('Entering %s' % method.__name__)
return method(self, *args, **kwargs)
return wrapped
class MessageParseError(Exception):
pass
class EapQueueMessage(namedtuple('EapQueueMessage',
'message src_mac port_mac')):
pass
class RadiusQueueMessage(namedtuple('RadiusQueueMessage',
'message src_mac identity state port_mac')):
pass
| true | true |
1c3ce019f9026307ee581c24e1136c8bc162b37f | 196 | py | Python | dufi/commands/cmd_excel/excellib/utils.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/commands/cmd_excel/excellib/utils.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | dufi/commands/cmd_excel/excellib/utils.py | Shura1oplot/dufi | c9c25524020e57d3670c298acca305900b6490e7 | [
"MIT"
] | null | null | null | # [SublimeLinter @python:3]
from xlsxwriter.utility import xl_cell_to_rowcol
def colno(s):
if not s.isalpha():
raise ValueError(s)
return xl_cell_to_rowcol(s.upper() + "1")[1]
| 17.818182 | 48 | 0.678571 |
from xlsxwriter.utility import xl_cell_to_rowcol
def colno(s):
if not s.isalpha():
raise ValueError(s)
return xl_cell_to_rowcol(s.upper() + "1")[1]
| true | true |
1c3ce01b7d0000dbb7203ee5e161d61b9a3d04e2 | 2,274 | py | Python | verilog/benchmarks_small/mux/common.py | cliffordwolf/yosys-benchmarks | 52ff6fa991f2ab509618d8aaad02f307aac78848 | [
"0BSD"
] | 14 | 2018-10-08T05:08:54.000Z | 2022-01-29T23:12:20.000Z | verilog/benchmarks_small/mux/common.py | cliffordwolf/yosys-benchmarks | 52ff6fa991f2ab509618d8aaad02f307aac78848 | [
"0BSD"
] | 3 | 2019-02-27T15:16:50.000Z | 2020-02-15T16:15:43.000Z | verilog/benchmarks_small/mux/common.py | cliffordwolf/yosys-benchmarks | 52ff6fa991f2ab509618d8aaad02f307aac78848 | [
"0BSD"
] | 6 | 2019-02-04T20:16:49.000Z | 2021-02-05T03:29:29.000Z | from math import log2, ceil
def gen_mux_index(N,W):
with open("mux_index_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_index_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output [W-1:0] o);
assign o = i[s*W+:W];
endmodule
""".format(N,W), file=f)
def gen_mux_case(N,W):
with open("mux_case_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_case_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @*
case (s)""".format(N,W), file=f)
for i in range( N):
print(" {0}: o <= i[{0}*W+:W];".format(i), file=f)
print(""" default: o <= {W{1'bx}};
endcase
endmodule
""", file=f)
def gen_mux_if_unbal(N,W):
with open("mux_if_unbal_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_if_unbal_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @*""".format(N,W), file=f)
print(" if (s == 0) o <= i[0*W+:W];", file=f)
for i in range(1,N):
print(" else if (s == {0}) o <= i[{0}*W+:W];".format(i), file=f)
print(" else o <= {W{1'bx}};", file=f)
print("""
endmodule
""", file=f)
def _gen_mux_if_bal_rec(f, N, depth):
indent = ' ' * depth
if len(N) == 1:
print(" {0}o <= i[{1}*W+:W];".format(indent, N[0]), file=f)
else:
print(" {0}if (s[{1}] == 1'b0)".format(indent, depth), file=f)
i = ceil(log2(len(N))) - 1
_gen_mux_if_bal_rec(f, N[:2**i], depth+1)
if N[2**i:] != [None]*len(N[2**i:]):
print(" {0}else".format(indent), file=f)
_gen_mux_if_bal_rec(f, N[2**i:], depth+1)
def gen_mux_if_bal(N,W):
with open("mux_if_bal_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_if_bal_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @* begin""".format(N,W), file=f)
pad = (2 ** int(ceil(log2(N)))) - N
print(" o <= {{W{{1'bx}}}};", file=f)
_gen_mux_if_bal_rec(f, list(range(N)) + [None]*pad, 0)
print("""end
endmodule
""", file=f)
| 35.53125 | 131 | 0.506157 | from math import log2, ceil
def gen_mux_index(N,W):
with open("mux_index_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_index_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output [W-1:0] o);
assign o = i[s*W+:W];
endmodule
""".format(N,W), file=f)
def gen_mux_case(N,W):
with open("mux_case_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_case_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @*
case (s)""".format(N,W), file=f)
for i in range( N):
print(" {0}: o <= i[{0}*W+:W];".format(i), file=f)
print(""" default: o <= {W{1'bx}};
endcase
endmodule
""", file=f)
def gen_mux_if_unbal(N,W):
with open("mux_if_unbal_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_if_unbal_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @*""".format(N,W), file=f)
print(" if (s == 0) o <= i[0*W+:W];", file=f)
for i in range(1,N):
print(" else if (s == {0}) o <= i[{0}*W+:W];".format(i), file=f)
print(" else o <= {W{1'bx}};", file=f)
print("""
endmodule
""", file=f)
def _gen_mux_if_bal_rec(f, N, depth):
indent = ' ' * depth
if len(N) == 1:
print(" {0}o <= i[{1}*W+:W];".format(indent, N[0]), file=f)
else:
print(" {0}if (s[{1}] == 1'b0)".format(indent, depth), file=f)
i = ceil(log2(len(N))) - 1
_gen_mux_if_bal_rec(f, N[:2**i], depth+1)
if N[2**i:] != [None]*len(N[2**i:]):
print(" {0}else".format(indent), file=f)
_gen_mux_if_bal_rec(f, N[2**i:], depth+1)
def gen_mux_if_bal(N,W):
with open("mux_if_bal_%d_%d.v" % (N,W), "w") as f:
print("""
(* top *)
module mux_if_bal_{0}_{1} #(parameter N={0}, parameter W={1}) (input [N*W-1:0] i, input [$clog2(N)-1:0] s, output reg [W-1:0] o);
always @* begin""".format(N,W), file=f)
pad = (2 ** int(ceil(log2(N)))) - N
print(" o <= {{W{{1'bx}}}};", file=f)
_gen_mux_if_bal_rec(f, list(range(N)) + [None]*pad, 0)
print("""end
endmodule
""", file=f)
| true | true |
1c3ce045d5ce5d1cc57bbfbf2b4b70a1dd582faf | 258 | py | Python | ppcd/models/layers/__init__.py | geoyee/PdRSCD | 4a1a7256320f006c15e3e5b5b238fdfba8198853 | [
"Apache-2.0"
] | 44 | 2021-04-21T02:41:55.000Z | 2022-03-09T03:01:16.000Z | ppcd/models/layers/__init__.py | MinZHANG-WHU/PdRSCD | 612976225201d78adc7ff99529ada17b41fedc5d | [
"Apache-2.0"
] | 2 | 2021-09-30T07:52:47.000Z | 2022-02-12T09:05:35.000Z | ppcd/models/layers/__init__.py | MinZHANG-WHU/PdRSCD | 612976225201d78adc7ff99529ada17b41fedc5d | [
"Apache-2.0"
] | 6 | 2021-07-23T02:18:39.000Z | 2022-01-14T01:15:50.000Z | from .layer_libs import ConvBN, ConvBNReLU, SeparableConvBNReLU, AuxLayer, SyncBatchNorm
from .pyramid_pool import PPModule
from .initialize import kaiming_normal_init, constant_init, normal_init
from .attention import CAM, SAM, BAM, PAM, GatedAttentionLayer | 64.5 | 88 | 0.848837 | from .layer_libs import ConvBN, ConvBNReLU, SeparableConvBNReLU, AuxLayer, SyncBatchNorm
from .pyramid_pool import PPModule
from .initialize import kaiming_normal_init, constant_init, normal_init
from .attention import CAM, SAM, BAM, PAM, GatedAttentionLayer | true | true |
1c3ce17ffa07012243e950028e51b29ef820183c | 98 | py | Python | conf.py | braxtons12/C2nxt | be30f17a321ae2e433ef11a09e82acbbabdae944 | [
"MIT"
] | 3 | 2021-08-15T23:45:26.000Z | 2022-01-03T04:14:32.000Z | conf.py | braxtons12/C2nxt | be30f17a321ae2e433ef11a09e82acbbabdae944 | [
"MIT"
] | null | null | null | conf.py | braxtons12/C2nxt | be30f17a321ae2e433ef11a09e82acbbabdae944 | [
"MIT"
] | null | null | null | LINKS_NAVBAR1 = [
("Modules", 'modules', [])
]
LINKS_NAVBAR2 = [
("Classes", 'annotated', [])
]
| 14 | 29 | 0.571429 | LINKS_NAVBAR1 = [
("Modules", 'modules', [])
]
LINKS_NAVBAR2 = [
("Classes", 'annotated', [])
]
| true | true |
1c3ce1b7fa31ef9e108dfe8a9f820c3bc1098564 | 2,323 | py | Python | fiftyone/core/media.py | callmekofi/fiftyone | 261e35d07d2546c6bf77f7be98dde5dab415d01d | [
"Apache-2.0"
] | 3 | 2022-01-18T06:13:33.000Z | 2022-02-14T13:28:23.000Z | fiftyone/core/media.py | callmekofi/fiftyone | 261e35d07d2546c6bf77f7be98dde5dab415d01d | [
"Apache-2.0"
] | null | null | null | fiftyone/core/media.py | callmekofi/fiftyone | 261e35d07d2546c6bf77f7be98dde5dab415d01d | [
"Apache-2.0"
] | null | null | null | """
Sample media utilities.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import multiprocessing
import eta.core.utils as etau
import eta.core.video as etav
import fiftyone.core.utils as fou
# Valid media types
# @todo convert to a MediaType enum class?
VIDEO = "video"
IMAGE = "image"
MEDIA_TYPES = {IMAGE, VIDEO}
def get_media_type(filepath):
"""Gets the media type for the given filepath.
Args:
filepath: a filepath
Returns:
the media type
"""
# @todo use `etav.is_supported_video_file` instead?
if etav.is_video_mime_type(filepath):
return VIDEO
# @todo don't assume all non-video samples are images!
return IMAGE
def export_media(inpaths, outpaths, mode="copy", num_workers=None):
"""Exports the media at the given input paths to the given output paths.
Args:
inpaths: the list of input paths
outpaths: the list of output paths
mode ("copy"): the export mode to use. Supported values are
``("copy", "move", "symlink")``
num_workers (None): the number of processes to use. By default,
``multiprocessing.cpu_count()`` is used
"""
num_files = len(inpaths)
if num_files == 0:
return
if num_workers is None:
num_workers = multiprocessing.cpu_count()
inputs = list(zip(inpaths, outpaths))
if mode == "copy":
op = _do_copy_file
elif mode == "move":
op = _do_move_file
elif mode == "symlink":
op = _do_symlink_file
else:
raise ValueError(
"Unsupported mode '%s'. Supported values are %s"
% (mode, ("copy", "move", "symlink"))
)
with fou.ProgressBar(total=num_files, iters_str="files") as pb:
with multiprocessing.Pool(processes=num_workers) as pool:
for _ in pb(pool.imap_unordered(op, inputs)):
pass
def _do_move_file(args):
inpath, outpath = args
etau.move_file(inpath, outpath)
def _do_copy_file(args):
inpath, outpath = args
etau.copy_file(inpath, outpath)
def _do_symlink_file(args):
inpath, outpath = args
etau.symlink_file(inpath, outpath)
class MediaTypeError(TypeError):
"""Exception raised when a problem with media types is encountered."""
pass
| 24.197917 | 76 | 0.643134 | import multiprocessing
import eta.core.utils as etau
import eta.core.video as etav
import fiftyone.core.utils as fou
VIDEO = "video"
IMAGE = "image"
MEDIA_TYPES = {IMAGE, VIDEO}
def get_media_type(filepath):
if etav.is_video_mime_type(filepath):
return VIDEO
return IMAGE
def export_media(inpaths, outpaths, mode="copy", num_workers=None):
num_files = len(inpaths)
if num_files == 0:
return
if num_workers is None:
num_workers = multiprocessing.cpu_count()
inputs = list(zip(inpaths, outpaths))
if mode == "copy":
op = _do_copy_file
elif mode == "move":
op = _do_move_file
elif mode == "symlink":
op = _do_symlink_file
else:
raise ValueError(
"Unsupported mode '%s'. Supported values are %s"
% (mode, ("copy", "move", "symlink"))
)
with fou.ProgressBar(total=num_files, iters_str="files") as pb:
with multiprocessing.Pool(processes=num_workers) as pool:
for _ in pb(pool.imap_unordered(op, inputs)):
pass
def _do_move_file(args):
inpath, outpath = args
etau.move_file(inpath, outpath)
def _do_copy_file(args):
inpath, outpath = args
etau.copy_file(inpath, outpath)
def _do_symlink_file(args):
inpath, outpath = args
etau.symlink_file(inpath, outpath)
class MediaTypeError(TypeError):
pass
| true | true |
1c3ce28081acfd617d52284024dc525fc91a71d8 | 1,510 | py | Python | 截图快捷翻译/main.py | liusongtao99/tools_python | f01d315c46a619828d02ed9327f4264ba3a382d8 | [
"Apache-2.0"
] | 130 | 2019-05-19T16:17:26.000Z | 2022-03-30T11:48:38.000Z | 截图快捷翻译/main.py | liusongtao99/tools_python | f01d315c46a619828d02ed9327f4264ba3a382d8 | [
"Apache-2.0"
] | null | null | null | 截图快捷翻译/main.py | liusongtao99/tools_python | f01d315c46a619828d02ed9327f4264ba3a382d8 | [
"Apache-2.0"
] | 119 | 2019-05-27T09:45:14.000Z | 2022-03-09T03:44:53.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xag
@license: Apache Licence
@contact: xinganguo@gmail.com
@site: http://www.xingag.top
@software: PyCharm
@file: main.py
@time: 2020-03-18 09:29
@description:TODO
"""
import tkinter.messagebox
from tkinter import *
import pytesseract
from PIL import Image
from PIL import ImageGrab
from googletrans import Translator
def get_clip_image():
"""
从剪切板获取图片,保存到本地
:return:
"""
image_result = None
img = ImageGrab.grabclipboard()
if img and isinstance(img, Image.Image):
print(img.size)
print(img.mode)
image_result = './temp.png'
img.save(image_result)
return image_result
def image_ocr(image_path):
"""
识别图像中的英文
:return:
"""
# 英文:lang='eng'
# 中文:lang='chi_sim'
return pytesseract.image_to_string(Image.open(image_path), lang='eng')
def trans_eng(content_eng):
"""
英文-中文
:param content:
:return:
"""
translator = Translator(service_urls=['translate.google.cn'])
return translator.translate(content_eng, src='en', dest='zh-cn').text
image_path = get_clip_image()
if image_path:
# 获取文本
content_eng = image_ocr(image_path).replace("\r", " ").replace("\n", " ")
# 翻译
if content_eng:
content_chinese = trans_eng(content_eng)
print(content_chinese)
# 实现主窗口隐藏
root = Tk()
root.withdraw()
tkinter.messagebox.showinfo('翻译结果', content_chinese) | 20.405405 | 77 | 0.642384 |
import tkinter.messagebox
from tkinter import *
import pytesseract
from PIL import Image
from PIL import ImageGrab
from googletrans import Translator
def get_clip_image():
image_result = None
img = ImageGrab.grabclipboard()
if img and isinstance(img, Image.Image):
print(img.size)
print(img.mode)
image_result = './temp.png'
img.save(image_result)
return image_result
def image_ocr(image_path):
return pytesseract.image_to_string(Image.open(image_path), lang='eng')
def trans_eng(content_eng):
translator = Translator(service_urls=['translate.google.cn'])
return translator.translate(content_eng, src='en', dest='zh-cn').text
image_path = get_clip_image()
if image_path:
content_eng = image_ocr(image_path).replace("\r", " ").replace("\n", " ")
if content_eng:
content_chinese = trans_eng(content_eng)
print(content_chinese)
root = Tk()
root.withdraw()
tkinter.messagebox.showinfo('翻译结果', content_chinese) | true | true |
1c3ce297aba483c799d217f1c5ef168d40b272da | 1,738 | py | Python | APP/models.py | jhodges0845/Flask_Showcase | f91aa80034de194a6697e047ec2a1075a37da61d | [
"MIT"
] | null | null | null | APP/models.py | jhodges0845/Flask_Showcase | f91aa80034de194a6697e047ec2a1075a37da61d | [
"MIT"
] | 3 | 2021-06-08T21:20:46.000Z | 2022-03-12T00:24:30.000Z | APP/models.py | jhodges0845/Flask_Showcase | f91aa80034de194a6697e047ec2a1075a37da61d | [
"MIT"
] | null | null | null | from APP import db, login_manager
from flask import current_app
from flask_login import UserMixin
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__= 'user'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
logs = db.relationship('Log', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config["SECRET_KEY"])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}','{self.email}', '{self.image_file}')"
class Log(db.Model):
__tablename__= 'log'
id = db.Column(db.Integer, primary_key = True)
comment = db.Column(db.String(500), nullable=False)
event_datetime = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
location = db.Column(db.String(50), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Log('{self.location}', '{self.event_datetime}')"
| 36.978723 | 84 | 0.692175 | from APP import db, login_manager
from flask import current_app
from flask_login import UserMixin
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
__tablename__= 'user'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
logs = db.relationship('Log', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config["SECRET_KEY"])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}','{self.email}', '{self.image_file}')"
class Log(db.Model):
__tablename__= 'log'
id = db.Column(db.Integer, primary_key = True)
comment = db.Column(db.String(500), nullable=False)
event_datetime = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
location = db.Column(db.String(50), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Log('{self.location}', '{self.event_datetime}')"
| true | true |
1c3ce2da3c973967580037a197f58386b762d0f3 | 31,484 | py | Python | pythonFiles/preview/jedi/evaluate/iterable.py | Andrewnetwork/pythonVSCode | 415e20cbd5947dc48f5dd57787e6c96985989d30 | [
"MIT"
] | null | null | null | pythonFiles/preview/jedi/evaluate/iterable.py | Andrewnetwork/pythonVSCode | 415e20cbd5947dc48f5dd57787e6c96985989d30 | [
"MIT"
] | null | null | null | pythonFiles/preview/jedi/evaluate/iterable.py | Andrewnetwork/pythonVSCode | 415e20cbd5947dc48f5dd57787e6c96985989d30 | [
"MIT"
] | null | null | null | """
Contains all classes and functions to deal with lists, dicts, generators and
iterators in general.
Array modifications
*******************
If the content of an array (``set``/``list``) is requested somewhere, the
current module will be checked for appearances of ``arr.append``,
``arr.insert``, etc. If the ``arr`` name points to an actual array, the
content will be added
This can be really cpu intensive, as you can imagine. Because |jedi| has to
follow **every** ``append`` and check wheter it's the right array. However this
works pretty good, because in *slow* cases, the recursion detector and other
settings will stop this process.
It is important to note that:
1. Array modfications work only in the current module.
2. Jedi only checks Array additions; ``list.pop``, etc are ignored.
"""
from jedi import debug
from jedi import settings
from jedi import common
from jedi.common import unite, safe_property
from jedi._compatibility import unicode, zip_longest, is_py3
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi.evaluate import context
from jedi.evaluate import precedence
from jedi.evaluate import recursion
from jedi.evaluate.cache import memoize_default
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, \
ParserTreeFilter
class AbstractSequence(context.Context):
builtin_methods = {}
api_type = 'instance'
def __init__(self, evaluator):
super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS)
def get_filters(self, search_global, until_position=None, origin_scope=None):
raise NotImplementedError
@property
def name(self):
return compiled.CompiledContextName(self, self.array_type)
class BuiltinMethod(object):
"""``Generator.__next__`` ``dict.values`` methods and so on."""
def __init__(self, builtin_context, method, builtin_func):
self._builtin_context = builtin_context
self._method = method
self._builtin_func = builtin_func
def py__call__(self, params):
return self._method(self._builtin_context)
def __getattr__(self, name):
return getattr(self._builtin_func, name)
class SpecialMethodFilter(DictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_context):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_context = builtin_context
def infer(self):
filter = next(self._builtin_context.get_filters())
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
builtin_func = next(iter(filter.get(self.string_name)[0].infer()))
return set([BuiltinMethod(self.parent_context, self._callable, builtin_func)])
def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct)
self.context = context
self._builtin_context = builtin_context
"""
This context is what will be used to introspect the name, where as the
other context will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.context, name, value, self._builtin_context)
def has_builtin_methods(cls):
base_dct = {}
# Need to care properly about inheritance. Builtin Methods should not get
# lost, just because they are not mentioned in a class.
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.builtin_methods)
except AttributeError:
pass
cls.builtin_methods = base_dct
for func in cls.__dict__.values():
try:
cls.builtin_methods.update(func.registered_builtin_methods)
except AttributeError:
pass
return cls
def register_builtin_method(method_name, python_version_match=None):
def wrapper(func):
if python_version_match and python_version_match != 2 + int(is_py3):
# Some functions do only apply to certain versions.
return func
dct = func.__dict__.setdefault('registered_builtin_methods', {})
dct[method_name] = func
return func
return wrapper
@has_builtin_methods
class GeneratorMixin(object):
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@register_builtin_method('__next__', python_version_match=3)
def py__next__(self):
# TODO add TypeError if params are given.
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def get_filters(self, search_global, until_position=None, origin_scope=None):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
yield SpecialMethodFilter(self, self.builtin_methods, gen_obj)
for filter in gen_obj.get_filters(search_global):
yield filter
def py__bool__(self):
return True
def py__class__(self):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
return gen_obj.py__class__()
@property
def name(self):
return compiled.CompiledContextName(self, 'generator')
class Generator(GeneratorMixin, context.Context):
"""Handling of `yield` functions."""
def __init__(self, evaluator, func_execution_context):
super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS)
self._func_execution_context = func_execution_context
def py__iter__(self):
return self._func_execution_context.get_yield_values()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
class CompForContext(context.TreeContext):
@classmethod
def from_comp_for(cls, parent_context, comp_for):
return cls(parent_context.evaluator, parent_context, comp_for)
def __init__(self, evaluator, parent_context, comp_for):
super(CompForContext, self).__init__(evaluator, parent_context)
self.tree_node = comp_for
def get_node(self):
return self.tree_node
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield ParserTreeFilter(self.evaluator, self)
class Comprehension(AbstractSequence):
@staticmethod
def from_atom(evaluator, context, atom):
bracket = atom.children[0]
if bracket == '{':
if atom.children[1].children[1] == ':':
cls = DictComprehension
else:
cls = SetComprehension
elif bracket == '(':
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
return cls(evaluator, context, atom)
def __init__(self, evaluator, defining_context, atom):
super(Comprehension, self).__init__(evaluator)
self._defining_context = defining_context
self._atom = atom
def _get_comprehension(self):
# The atom contains a testlist_comp
return self._atom.children[1]
def _get_comp_for(self):
# The atom contains a testlist_comp
return self._get_comprehension().children[1]
def _eval_node(self, index=0):
"""
The first part `x + 1` of the list comprehension:
[x + 1 for x in foo]
"""
return self._get_comprehension().children[index]
@memoize_default()
def _get_comp_for_context(self, parent_context, comp_for):
# TODO shouldn't this be part of create_context?
return CompForContext.from_comp_for(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
evaluator = self.evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node)
iterated = py__iter__(evaluator, input_types, input_node)
exprlist = comp_for.children[1]
for i, lazy_context in enumerate(iterated):
types = lazy_context.infer()
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
context = self._get_comp_for_context(
parent_context,
comp_for,
)
with helpers.predefine_names(context, comp_for, dct):
try:
for result in self._nested(comp_fors[1:], context):
yield result
except IndexError:
iterated = context.eval_node(self._eval_node())
if self.array_type == 'dict':
yield iterated, context.eval_node(self._eval_node(2))
else:
yield iterated
@memoize_default(default=[])
@common.to_list
def _iterate(self):
comp_fors = tuple(self._get_comp_for().get_comp_fors())
for result in self._nested(comp_fors):
yield result
def py__iter__(self):
for set_ in self._iterate():
yield context.LazyKnownContexts(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._atom)
class ArrayMixin(object):
def get_filters(self, search_global, until_position=None, origin_scope=None):
# `array.type` is a string with the type, e.g. 'list'.
compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type)
yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj)
for typ in compiled_obj.execute_evaluated(self):
for filter in typ.get_filters():
yield filter
def py__bool__(self):
return None # We don't know the length, because of appends.
def py__class__(self):
return compiled.builtin_from_name(self.evaluator, self.array_type)
@safe_property
def parent(self):
return self.evaluator.BUILTINS
def dict_values(self):
return unite(self._defining_context.eval_node(v) for k, v in self._items())
class ListComprehension(ArrayMixin, Comprehension):
array_type = 'list'
def py__getitem__(self, index):
if isinstance(index, slice):
return set([self])
all_types = list(self.py__iter__())
return all_types[index].infer()
class SetComprehension(ArrayMixin, Comprehension):
array_type = 'set'
@has_builtin_methods
class DictComprehension(ArrayMixin, Comprehension):
array_type = 'dict'
def _get_comp_for(self):
return self._get_comprehension().children[3]
def py__iter__(self):
for keys, values in self._iterate():
yield context.LazyKnownContexts(keys)
def py__getitem__(self, index):
for keys, values in self._iterate():
for k in keys:
if isinstance(k, compiled.CompiledObject):
if k.obj == index:
return values
return self.dict_values()
def dict_values(self):
return unite(values for keys, values in self._iterate())
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
items = set(
FakeSequence(
self.evaluator, 'tuple'
(context.LazyKnownContexts(keys), context.LazyKnownContexts(values))
) for keys, values in self._iterate()
)
return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list')
class GeneratorComprehension(GeneratorMixin, Comprehension):
pass
class SequenceLiteralContext(ArrayMixin, AbstractSequence):
mapping = {'(': 'tuple',
'[': 'list',
'{': 'set'}
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self.atom = atom
self._defining_context = defining_context
if self.atom.type in ('testlist_star_expr', 'testlist'):
self.array_type = 'tuple'
else:
self.array_type = SequenceLiteralContext.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
def py__getitem__(self, index):
"""Here the index is an int/str. Raises IndexError/KeyError."""
if self.array_type == 'dict':
for key, value in self._items():
for k in self._defining_context.eval_node(key):
if isinstance(k, compiled.CompiledObject) \
and index == k.obj:
return self._defining_context.eval_node(value)
raise KeyError('No key found in dictionary %s.' % self)
# Can raise an IndexError
if isinstance(index, slice):
return set([self])
else:
return self._defining_context.eval_node(self._items()[index])
def py__iter__(self):
"""
While values returns the possible values for any array field, this
function returns the value for a certain index.
"""
if self.array_type == 'dict':
# Get keys.
types = set()
for k, _ in self._items():
types |= self._defining_context.eval_node(k)
# We don't know which dict index comes first, therefore always
# yield all the types.
for _ in types:
yield context.LazyKnownContexts(types)
else:
for node in self._items():
yield context.LazyTreeContext(self._defining_context, node)
for addition in check_array_additions(self._defining_context, self):
yield addition
def _values(self):
"""Returns a list of a list of node."""
if self.array_type == 'dict':
return unite(v for k, v in self._items())
else:
return self._items()
def _items(self):
c = self.atom.children
if self.atom.type in ('testlist_star_expr', 'testlist'):
return c[::2]
array_node = c[1]
if array_node in (']', '}', ')'):
return [] # Direct closing bracket, doesn't contain items.
if array_node.type == 'testlist_comp':
return array_node.children[::2]
elif array_node.type == 'dictorsetmaker':
kv = []
iterator = iter(array_node.children)
for key in iterator:
op = next(iterator, None)
if op is None or op == ',':
kv.append(key) # A set.
else:
assert op == ':' # A dict.
kv.append((key, next(iterator)))
next(iterator, None) # Possible comma.
return kv
else:
return [array_node]
def exact_key_items(self):
"""
Returns a generator of tuples like dict.items(), where the key is
resolved (as a string) and the values are still lazy contexts.
"""
for key_node, value in self._items():
for key in self._defining_context.eval_node(key_node):
if precedence.is_string(key):
yield key.obj, context.LazyTreeContext(self._defining_context, value)
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
@has_builtin_methods
class DictLiteralContext(SequenceLiteralContext):
array_type = 'dict'
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self._defining_context = defining_context
self.atom = atom
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
lazy_contexts = [
context.LazyKnownContext(FakeSequence(
self.evaluator, 'tuple',
(context.LazyTreeContext(self._defining_context, key_node),
context.LazyTreeContext(self._defining_context, value_node))
)) for key_node, value_node in self._items()
]
return set([FakeSequence(self.evaluator, 'list', lazy_contexts)])
class _FakeArray(SequenceLiteralContext):
def __init__(self, evaluator, container, type):
super(SequenceLiteralContext, self).__init__(evaluator)
self.array_type = type
self.atom = container
# TODO is this class really needed?
class ImplicitTuple(_FakeArray):
def __init__(self, evaluator, testlist):
super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple')
raise NotImplementedError
self._testlist = testlist
def _items(self):
return self._testlist.children[::2]
class FakeSequence(_FakeArray):
def __init__(self, evaluator, array_type, lazy_context_list):
"""
type should be one of "tuple", "list"
"""
super(FakeSequence, self).__init__(evaluator, None, array_type)
self._lazy_context_list = lazy_context_list
def _items(self):
raise DeprecationWarning
return self._context_list
def py__getitem__(self, index):
return set(self._lazy_context_list[index].infer())
def py__iter__(self):
return self._lazy_context_list
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
class FakeDict(_FakeArray):
def __init__(self, evaluator, dct):
super(FakeDict, self).__init__(evaluator, dct, 'dict')
self._dct = dct
def py__iter__(self):
for key in self._dct:
yield context.LazyKnownContext(compiled.create(self.evaluator, key))
def py__getitem__(self, index):
return self._dct[index].infer()
def dict_values(self):
return unite(lazy_context.infer() for lazy_context in self._dct.values())
def _items(self):
raise DeprecationWarning
for key, values in self._dct.items():
# TODO this is not proper. The values could be multiple values?!
yield key, values[0]
def exact_key_items(self):
return self._dct.items()
class MergedArray(_FakeArray):
def __init__(self, evaluator, arrays):
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
self._arrays = arrays
def py__iter__(self):
for array in self._arrays:
for lazy_context in array.py__iter__():
yield lazy_context
def py__getitem__(self, index):
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def _items(self):
for array in self._arrays:
for a in array._items():
yield a
def __len__(self):
return sum(len(a) for a in self._arrays)
def unpack_tuple_to_dict(evaluator, types, exprlist):
"""
Unpacking tuple assignments in for statements and expr_stmts.
"""
if exprlist.type == 'name':
return {exprlist.value: types}
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1])
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
'testlist_star_expr'):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for lazy_context in py__iter__(evaluator, types, exprlist):
n += 1
try:
part = next(parts)
except StopIteration:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n)
else:
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
has_parts = next(parts, None)
if types and has_parts is not None:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts,
message="ValueError: need more than %s values to unpack" % n)
return dct
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
# Something like ``arr[x], var = ...``.
# This is something that is not yet supported, would also be difficult
# to write into a dict.
return {}
elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings
# Currently we're not supporting them.
return {}
raise NotImplementedError
def py__iter__(evaluator, types, node=None):
debug.dbg('py__iter__')
type_iters = []
for typ in types:
try:
iter_method = typ.py__iter__
except AttributeError:
if node is not None:
# TODO this context is probably not right.
analysis.add(typ, 'type-error-not-iterable', node,
message="TypeError: '%s' object is not iterable" % typ)
else:
type_iters.append(iter_method())
for lazy_contexts in zip_longest(*type_iters):
yield context.get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def py__iter__types(evaluator, types, node=None):
"""
Calls `py__iter__`, but ignores the ordering in the end and just returns
all types that it contains.
"""
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
def py__getitem__(evaluator, context, types, trailer):
from jedi.evaluate.representation import ClassContext
from jedi.evaluate.instance import TreeInstance
result = set()
trailer_op, node, trailer_cl = trailer.children
assert trailer_op == "["
assert trailer_cl == "]"
# special case: PEP0484 typing module, see
# https://github.com/davidhalter/jedi/issues/663
for typ in list(types):
if isinstance(typ, (ClassContext, TreeInstance)):
typing_module_types = pep0484.py__getitem__(context, typ, node)
if typing_module_types is not None:
types.remove(typ)
result |= typing_module_types
if not types:
# all consumed by special cases
return result
for index in create_index_types(evaluator, context, node):
if isinstance(index, (compiled.CompiledObject, Slice)):
index = index.obj
if type(index) not in (float, int, str, unicode, slice):
# If the index is not clearly defined, we have to get all the
# possiblities.
for typ in list(types):
if isinstance(typ, AbstractSequence) and typ.array_type == 'dict':
types.remove(typ)
result |= typ.dict_values()
return result | py__iter__types(evaluator, types)
for typ in types:
# The actual getitem call.
try:
getitem = typ.py__getitem__
except AttributeError:
# TODO this context is probably not right.
analysis.add(context, 'type-error-not-subscriptable', trailer_op,
message="TypeError: '%s' object is not subscriptable" % typ)
else:
try:
result |= getitem(index)
except IndexError:
result |= py__iter__types(evaluator, set([typ]))
except KeyError:
# Must be a dict. Lists don't raise KeyErrors.
result |= typ.dict_values()
return result
def check_array_additions(context, sequence):
""" Just a mapper function for the internal _check_array_additions """
if sequence.array_type not in ('list', 'set'):
# TODO also check for dict updates
return set()
return _check_array_additions(context, sequence)
@memoize_default(default=set())
@debug.increase_indent
def _check_array_additions(context, sequence):
"""
Checks if a `Array` has "add" (append, insert, extend) statements:
>>> a = [""]
>>> a.append(1)
"""
from jedi.evaluate import param
debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
module_context = context.get_root_context()
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return set()
def find_additions(context, arglist, add_name):
params = list(param.TreeArguments(context.evaluator, context, arglist).unpack())
result = set()
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, lazy_context in params:
result.add(lazy_context)
elif add_name in ['extend', 'update']:
for key, lazy_context in params:
result |= set(py__iter__(context.evaluator, lazy_context.infer()))
return result
temp_param_add, settings.dynamic_params_for_other_modules = \
settings.dynamic_params_for_other_modules, False
is_list = sequence.name.string_name == 'list'
search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
added_types = set()
for add_name in search_names:
try:
possible_names = module_context.tree_node.used_names[add_name]
except KeyError:
continue
else:
for name in possible_names:
context_node = context.tree_node
if not (context_node.start_pos < name.start_pos < context_node.end_pos):
continue
trailer = name.parent
power = trailer.parent
trailer_pos = power.children.index(trailer)
try:
execution_trailer = power.children[trailer_pos + 1]
except IndexError:
continue
else:
if execution_trailer.type != 'trailer' \
or execution_trailer.children[0] != '(' \
or execution_trailer.children[1] == ')':
continue
random_context = context.create_context(name)
with recursion.execution_allowed(context.evaluator, power) as allowed:
if allowed:
found = helpers.evaluate_call_of_leaf(
random_context,
name,
cut_own_trailer=True
)
if sequence in found:
# The arrays match. Now add the results
added_types |= find_additions(
random_context,
execution_trailer.children[1],
add_name
)
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
return added_types
def get_dynamic_array_instance(instance):
"""Used for set() and list() instances."""
if not settings.dynamic_array_additions:
return instance.var_args
ai = _ArrayInstance(instance)
from jedi.evaluate import param
return param.ValuesArguments([[ai]])
class _ArrayInstance(object):
"""
Used for the usage of set() and list().
This is definitely a hack, but a good one :-)
It makes it possible to use set/list conversions.
In contrast to Array, ListComprehension and all other iterable types, this
is something that is only used inside `evaluate/compiled/fake/builtins.py`
and therefore doesn't need filters, `py__bool__` and so on, because
we don't use these operations in `builtins.py`.
"""
def __init__(self, instance):
self.instance = instance
self.var_args = instance.var_args
def py__iter__(self):
var_args = self.var_args
try:
_, lazy_context = next(var_args.unpack())
except StopIteration:
pass
else:
for lazy in py__iter__(self.instance.evaluator, lazy_context.infer()):
yield lazy
from jedi.evaluate import param
if isinstance(var_args, param.TreeArguments):
additions = _check_array_additions(var_args.context, self.instance)
for addition in additions:
yield addition
class Slice(context.Context):
def __init__(self, context, start, stop, step):
super(Slice, self).__init__(
context.evaluator,
parent_context=context.evaluator.BUILTINS
)
self._context = context
# all of them are either a Precedence or None.
self._start = start
self._stop = stop
self._step = step
@property
def obj(self):
"""
Imitate CompiledObject.obj behavior and return a ``builtin.slice()``
object.
"""
def get(element):
if element is None:
return None
result = self._context.eval_node(element)
if len(result) != 1:
# For simplicity, we want slices to be clear defined with just
# one type. Otherwise we will return an empty slice object.
raise IndexError
try:
return list(result)[0].obj
except AttributeError:
return None
try:
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
def create_index_types(evaluator, context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return set([Slice(context, None, None, None)])
elif index.type == 'subscript': # subscript is a slice operation.
# Like array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return set([Slice(context, *result)])
# No slices
return context.eval_node(index)
| 35.256439 | 99 | 0.621427 | from jedi import debug
from jedi import settings
from jedi import common
from jedi.common import unite, safe_property
from jedi._compatibility import unicode, zip_longest, is_py3
from jedi.evaluate import compiled
from jedi.evaluate import helpers
from jedi.evaluate import analysis
from jedi.evaluate import pep0484
from jedi.evaluate import context
from jedi.evaluate import precedence
from jedi.evaluate import recursion
from jedi.evaluate.cache import memoize_default
from jedi.evaluate.filters import DictFilter, AbstractNameDefinition, \
ParserTreeFilter
class AbstractSequence(context.Context):
builtin_methods = {}
api_type = 'instance'
def __init__(self, evaluator):
super(AbstractSequence, self).__init__(evaluator, evaluator.BUILTINS)
def get_filters(self, search_global, until_position=None, origin_scope=None):
raise NotImplementedError
@property
def name(self):
return compiled.CompiledContextName(self, self.array_type)
class BuiltinMethod(object):
def __init__(self, builtin_context, method, builtin_func):
self._builtin_context = builtin_context
self._method = method
self._builtin_func = builtin_func
def py__call__(self, params):
return self._method(self._builtin_context)
def __getattr__(self, name):
return getattr(self._builtin_func, name)
class SpecialMethodFilter(DictFilter):
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_context):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_context = builtin_context
def infer(self):
filter = next(self._builtin_context.get_filters())
# always only going to be one name. The same is true for the
# inferred values.
builtin_func = next(iter(filter.get(self.string_name)[0].infer()))
return set([BuiltinMethod(self.parent_context, self._callable, builtin_func)])
def __init__(self, context, dct, builtin_context):
super(SpecialMethodFilter, self).__init__(dct)
self.context = context
self._builtin_context = builtin_context
def _convert(self, name, value):
return self.SpecialMethodName(self.context, name, value, self._builtin_context)
def has_builtin_methods(cls):
base_dct = {}
# Need to care properly about inheritance. Builtin Methods should not get
# lost, just because they are not mentioned in a class.
for base_cls in reversed(cls.__bases__):
try:
base_dct.update(base_cls.builtin_methods)
except AttributeError:
pass
cls.builtin_methods = base_dct
for func in cls.__dict__.values():
try:
cls.builtin_methods.update(func.registered_builtin_methods)
except AttributeError:
pass
return cls
def register_builtin_method(method_name, python_version_match=None):
def wrapper(func):
if python_version_match and python_version_match != 2 + int(is_py3):
# Some functions do only apply to certain versions.
return func
dct = func.__dict__.setdefault('registered_builtin_methods', {})
dct[method_name] = func
return func
return wrapper
@has_builtin_methods
class GeneratorMixin(object):
array_type = None
@register_builtin_method('send')
@register_builtin_method('next', python_version_match=2)
@register_builtin_method('__next__', python_version_match=3)
def py__next__(self):
# TODO add TypeError if params are given.
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def get_filters(self, search_global, until_position=None, origin_scope=None):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
yield SpecialMethodFilter(self, self.builtin_methods, gen_obj)
for filter in gen_obj.get_filters(search_global):
yield filter
def py__bool__(self):
return True
def py__class__(self):
gen_obj = compiled.get_special_object(self.evaluator, 'GENERATOR_OBJECT')
return gen_obj.py__class__()
@property
def name(self):
return compiled.CompiledContextName(self, 'generator')
class Generator(GeneratorMixin, context.Context):
def __init__(self, evaluator, func_execution_context):
super(Generator, self).__init__(evaluator, parent_context=evaluator.BUILTINS)
self._func_execution_context = func_execution_context
def py__iter__(self):
return self._func_execution_context.get_yield_values()
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._func_execution_context)
class CompForContext(context.TreeContext):
@classmethod
def from_comp_for(cls, parent_context, comp_for):
return cls(parent_context.evaluator, parent_context, comp_for)
def __init__(self, evaluator, parent_context, comp_for):
super(CompForContext, self).__init__(evaluator, parent_context)
self.tree_node = comp_for
def get_node(self):
return self.tree_node
def get_filters(self, search_global, until_position=None, origin_scope=None):
yield ParserTreeFilter(self.evaluator, self)
class Comprehension(AbstractSequence):
@staticmethod
def from_atom(evaluator, context, atom):
bracket = atom.children[0]
if bracket == '{':
if atom.children[1].children[1] == ':':
cls = DictComprehension
else:
cls = SetComprehension
elif bracket == '(':
cls = GeneratorComprehension
elif bracket == '[':
cls = ListComprehension
return cls(evaluator, context, atom)
def __init__(self, evaluator, defining_context, atom):
super(Comprehension, self).__init__(evaluator)
self._defining_context = defining_context
self._atom = atom
def _get_comprehension(self):
# The atom contains a testlist_comp
return self._atom.children[1]
def _get_comp_for(self):
# The atom contains a testlist_comp
return self._get_comprehension().children[1]
def _eval_node(self, index=0):
return self._get_comprehension().children[index]
@memoize_default()
def _get_comp_for_context(self, parent_context, comp_for):
# TODO shouldn't this be part of create_context?
return CompForContext.from_comp_for(parent_context, comp_for)
def _nested(self, comp_fors, parent_context=None):
evaluator = self.evaluator
comp_for = comp_fors[0]
input_node = comp_for.children[3]
parent_context = parent_context or self._defining_context
input_types = parent_context.eval_node(input_node)
iterated = py__iter__(evaluator, input_types, input_node)
exprlist = comp_for.children[1]
for i, lazy_context in enumerate(iterated):
types = lazy_context.infer()
dct = unpack_tuple_to_dict(evaluator, types, exprlist)
context = self._get_comp_for_context(
parent_context,
comp_for,
)
with helpers.predefine_names(context, comp_for, dct):
try:
for result in self._nested(comp_fors[1:], context):
yield result
except IndexError:
iterated = context.eval_node(self._eval_node())
if self.array_type == 'dict':
yield iterated, context.eval_node(self._eval_node(2))
else:
yield iterated
@memoize_default(default=[])
@common.to_list
def _iterate(self):
comp_fors = tuple(self._get_comp_for().get_comp_fors())
for result in self._nested(comp_fors):
yield result
def py__iter__(self):
for set_ in self._iterate():
yield context.LazyKnownContexts(set_)
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._atom)
class ArrayMixin(object):
def get_filters(self, search_global, until_position=None, origin_scope=None):
compiled_obj = compiled.builtin_from_name(self.evaluator, self.array_type)
yield SpecialMethodFilter(self, self.builtin_methods, compiled_obj)
for typ in compiled_obj.execute_evaluated(self):
for filter in typ.get_filters():
yield filter
def py__bool__(self):
return None
def py__class__(self):
return compiled.builtin_from_name(self.evaluator, self.array_type)
@safe_property
def parent(self):
return self.evaluator.BUILTINS
def dict_values(self):
return unite(self._defining_context.eval_node(v) for k, v in self._items())
class ListComprehension(ArrayMixin, Comprehension):
array_type = 'list'
def py__getitem__(self, index):
if isinstance(index, slice):
return set([self])
all_types = list(self.py__iter__())
return all_types[index].infer()
class SetComprehension(ArrayMixin, Comprehension):
array_type = 'set'
@has_builtin_methods
class DictComprehension(ArrayMixin, Comprehension):
array_type = 'dict'
def _get_comp_for(self):
return self._get_comprehension().children[3]
def py__iter__(self):
for keys, values in self._iterate():
yield context.LazyKnownContexts(keys)
def py__getitem__(self, index):
for keys, values in self._iterate():
for k in keys:
if isinstance(k, compiled.CompiledObject):
if k.obj == index:
return values
return self.dict_values()
def dict_values(self):
return unite(values for keys, values in self._iterate())
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
items = set(
FakeSequence(
self.evaluator, 'tuple'
(context.LazyKnownContexts(keys), context.LazyKnownContexts(values))
) for keys, values in self._iterate()
)
return create_evaluated_sequence_set(self.evaluator, items, sequence_type='list')
class GeneratorComprehension(GeneratorMixin, Comprehension):
pass
class SequenceLiteralContext(ArrayMixin, AbstractSequence):
mapping = {'(': 'tuple',
'[': 'list',
'{': 'set'}
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self.atom = atom
self._defining_context = defining_context
if self.atom.type in ('testlist_star_expr', 'testlist'):
self.array_type = 'tuple'
else:
self.array_type = SequenceLiteralContext.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
def py__getitem__(self, index):
if self.array_type == 'dict':
for key, value in self._items():
for k in self._defining_context.eval_node(key):
if isinstance(k, compiled.CompiledObject) \
and index == k.obj:
return self._defining_context.eval_node(value)
raise KeyError('No key found in dictionary %s.' % self)
# Can raise an IndexError
if isinstance(index, slice):
return set([self])
else:
return self._defining_context.eval_node(self._items()[index])
def py__iter__(self):
if self.array_type == 'dict':
# Get keys.
types = set()
for k, _ in self._items():
types |= self._defining_context.eval_node(k)
# We don't know which dict index comes first, therefore always
for _ in types:
yield context.LazyKnownContexts(types)
else:
for node in self._items():
yield context.LazyTreeContext(self._defining_context, node)
for addition in check_array_additions(self._defining_context, self):
yield addition
def _values(self):
if self.array_type == 'dict':
return unite(v for k, v in self._items())
else:
return self._items()
def _items(self):
c = self.atom.children
if self.atom.type in ('testlist_star_expr', 'testlist'):
return c[::2]
array_node = c[1]
if array_node in (']', '}', ')'):
return []
if array_node.type == 'testlist_comp':
return array_node.children[::2]
elif array_node.type == 'dictorsetmaker':
kv = []
iterator = iter(array_node.children)
for key in iterator:
op = next(iterator, None)
if op is None or op == ',':
kv.append(key) # A set.
else:
assert op == ':' # A dict.
kv.append((key, next(iterator)))
next(iterator, None) # Possible comma.
return kv
else:
return [array_node]
def exact_key_items(self):
for key_node, value in self._items():
for key in self._defining_context.eval_node(key_node):
if precedence.is_string(key):
yield key.obj, context.LazyTreeContext(self._defining_context, value)
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
@has_builtin_methods
class DictLiteralContext(SequenceLiteralContext):
array_type = 'dict'
def __init__(self, evaluator, defining_context, atom):
super(SequenceLiteralContext, self).__init__(evaluator)
self._defining_context = defining_context
self.atom = atom
@register_builtin_method('values')
def _imitate_values(self):
lazy_context = context.LazyKnownContexts(self.dict_values())
return set([FakeSequence(self.evaluator, 'list', [lazy_context])])
@register_builtin_method('items')
def _imitate_items(self):
lazy_contexts = [
context.LazyKnownContext(FakeSequence(
self.evaluator, 'tuple',
(context.LazyTreeContext(self._defining_context, key_node),
context.LazyTreeContext(self._defining_context, value_node))
)) for key_node, value_node in self._items()
]
return set([FakeSequence(self.evaluator, 'list', lazy_contexts)])
class _FakeArray(SequenceLiteralContext):
def __init__(self, evaluator, container, type):
super(SequenceLiteralContext, self).__init__(evaluator)
self.array_type = type
self.atom = container
# TODO is this class really needed?
class ImplicitTuple(_FakeArray):
def __init__(self, evaluator, testlist):
super(ImplicitTuple, self).__init__(evaluator, testlist, 'tuple')
raise NotImplementedError
self._testlist = testlist
def _items(self):
return self._testlist.children[::2]
class FakeSequence(_FakeArray):
def __init__(self, evaluator, array_type, lazy_context_list):
super(FakeSequence, self).__init__(evaluator, None, array_type)
self._lazy_context_list = lazy_context_list
def _items(self):
raise DeprecationWarning
return self._context_list
def py__getitem__(self, index):
return set(self._lazy_context_list[index].infer())
def py__iter__(self):
return self._lazy_context_list
def __repr__(self):
return "<%s of %s>" % (type(self).__name__, self._lazy_context_list)
class FakeDict(_FakeArray):
def __init__(self, evaluator, dct):
super(FakeDict, self).__init__(evaluator, dct, 'dict')
self._dct = dct
def py__iter__(self):
for key in self._dct:
yield context.LazyKnownContext(compiled.create(self.evaluator, key))
def py__getitem__(self, index):
return self._dct[index].infer()
def dict_values(self):
return unite(lazy_context.infer() for lazy_context in self._dct.values())
def _items(self):
raise DeprecationWarning
for key, values in self._dct.items():
# TODO this is not proper. The values could be multiple values?!
yield key, values[0]
def exact_key_items(self):
return self._dct.items()
class MergedArray(_FakeArray):
def __init__(self, evaluator, arrays):
super(MergedArray, self).__init__(evaluator, arrays, arrays[-1].array_type)
self._arrays = arrays
def py__iter__(self):
for array in self._arrays:
for lazy_context in array.py__iter__():
yield lazy_context
def py__getitem__(self, index):
return unite(lazy_context.infer() for lazy_context in self.py__iter__())
def _items(self):
for array in self._arrays:
for a in array._items():
yield a
def __len__(self):
return sum(len(a) for a in self._arrays)
def unpack_tuple_to_dict(evaluator, types, exprlist):
if exprlist.type == 'name':
return {exprlist.value: types}
elif exprlist.type == 'atom' and exprlist.children[0] in '([':
return unpack_tuple_to_dict(evaluator, types, exprlist.children[1])
elif exprlist.type in ('testlist', 'testlist_comp', 'exprlist',
'testlist_star_expr'):
dct = {}
parts = iter(exprlist.children[::2])
n = 0
for lazy_context in py__iter__(evaluator, types, exprlist):
n += 1
try:
part = next(parts)
except StopIteration:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-many-values', part,
message="ValueError: too many values to unpack (expected %s)" % n)
else:
dct.update(unpack_tuple_to_dict(evaluator, lazy_context.infer(), part))
has_parts = next(parts, None)
if types and has_parts is not None:
# TODO this context is probably not right.
analysis.add(next(iter(types)), 'value-error-too-few-values', has_parts,
message="ValueError: need more than %s values to unpack" % n)
return dct
elif exprlist.type == 'power' or exprlist.type == 'atom_expr':
# Something like ``arr[x], var = ...``.
# This is something that is not yet supported, would also be difficult
# to write into a dict.
return {}
elif exprlist.type == 'star_expr': # `a, *b, c = x` type unpackings
# Currently we're not supporting them.
return {}
raise NotImplementedError
def py__iter__(evaluator, types, node=None):
debug.dbg('py__iter__')
type_iters = []
for typ in types:
try:
iter_method = typ.py__iter__
except AttributeError:
if node is not None:
analysis.add(typ, 'type-error-not-iterable', node,
message="TypeError: '%s' object is not iterable" % typ)
else:
type_iters.append(iter_method())
for lazy_contexts in zip_longest(*type_iters):
yield context.get_merged_lazy_context(
[l for l in lazy_contexts if l is not None]
)
def py__iter__types(evaluator, types, node=None):
return unite(lazy_context.infer() for lazy_context in py__iter__(evaluator, types, node))
def py__getitem__(evaluator, context, types, trailer):
from jedi.evaluate.representation import ClassContext
from jedi.evaluate.instance import TreeInstance
result = set()
trailer_op, node, trailer_cl = trailer.children
assert trailer_op == "["
assert trailer_cl == "]"
for typ in list(types):
if isinstance(typ, (ClassContext, TreeInstance)):
typing_module_types = pep0484.py__getitem__(context, typ, node)
if typing_module_types is not None:
types.remove(typ)
result |= typing_module_types
if not types:
return result
for index in create_index_types(evaluator, context, node):
if isinstance(index, (compiled.CompiledObject, Slice)):
index = index.obj
if type(index) not in (float, int, str, unicode, slice):
for typ in list(types):
if isinstance(typ, AbstractSequence) and typ.array_type == 'dict':
types.remove(typ)
result |= typ.dict_values()
return result | py__iter__types(evaluator, types)
for typ in types:
try:
getitem = typ.py__getitem__
except AttributeError:
analysis.add(context, 'type-error-not-subscriptable', trailer_op,
message="TypeError: '%s' object is not subscriptable" % typ)
else:
try:
result |= getitem(index)
except IndexError:
result |= py__iter__types(evaluator, set([typ]))
except KeyError:
result |= typ.dict_values()
return result
def check_array_additions(context, sequence):
if sequence.array_type not in ('list', 'set'):
# TODO also check for dict updates
return set()
return _check_array_additions(context, sequence)
@memoize_default(default=set())
@debug.increase_indent
def _check_array_additions(context, sequence):
from jedi.evaluate import param
debug.dbg('Dynamic array search for %s' % sequence, color='MAGENTA')
module_context = context.get_root_context()
if not settings.dynamic_array_additions or isinstance(module_context, compiled.CompiledObject):
debug.dbg('Dynamic array search aborted.', color='MAGENTA')
return set()
def find_additions(context, arglist, add_name):
params = list(param.TreeArguments(context.evaluator, context, arglist).unpack())
result = set()
if add_name in ['insert']:
params = params[1:]
if add_name in ['append', 'add', 'insert']:
for key, lazy_context in params:
result.add(lazy_context)
elif add_name in ['extend', 'update']:
for key, lazy_context in params:
result |= set(py__iter__(context.evaluator, lazy_context.infer()))
return result
temp_param_add, settings.dynamic_params_for_other_modules = \
settings.dynamic_params_for_other_modules, False
is_list = sequence.name.string_name == 'list'
search_names = (['append', 'extend', 'insert'] if is_list else ['add', 'update'])
added_types = set()
for add_name in search_names:
try:
possible_names = module_context.tree_node.used_names[add_name]
except KeyError:
continue
else:
for name in possible_names:
context_node = context.tree_node
if not (context_node.start_pos < name.start_pos < context_node.end_pos):
continue
trailer = name.parent
power = trailer.parent
trailer_pos = power.children.index(trailer)
try:
execution_trailer = power.children[trailer_pos + 1]
except IndexError:
continue
else:
if execution_trailer.type != 'trailer' \
or execution_trailer.children[0] != '(' \
or execution_trailer.children[1] == ')':
continue
random_context = context.create_context(name)
with recursion.execution_allowed(context.evaluator, power) as allowed:
if allowed:
found = helpers.evaluate_call_of_leaf(
random_context,
name,
cut_own_trailer=True
)
if sequence in found:
# The arrays match. Now add the results
added_types |= find_additions(
random_context,
execution_trailer.children[1],
add_name
)
# reset settings
settings.dynamic_params_for_other_modules = temp_param_add
debug.dbg('Dynamic array result %s' % added_types, color='MAGENTA')
return added_types
def get_dynamic_array_instance(instance):
if not settings.dynamic_array_additions:
return instance.var_args
ai = _ArrayInstance(instance)
from jedi.evaluate import param
return param.ValuesArguments([[ai]])
class _ArrayInstance(object):
def __init__(self, instance):
self.instance = instance
self.var_args = instance.var_args
def py__iter__(self):
var_args = self.var_args
try:
_, lazy_context = next(var_args.unpack())
except StopIteration:
pass
else:
for lazy in py__iter__(self.instance.evaluator, lazy_context.infer()):
yield lazy
from jedi.evaluate import param
if isinstance(var_args, param.TreeArguments):
additions = _check_array_additions(var_args.context, self.instance)
for addition in additions:
yield addition
class Slice(context.Context):
def __init__(self, context, start, stop, step):
super(Slice, self).__init__(
context.evaluator,
parent_context=context.evaluator.BUILTINS
)
self._context = context
# all of them are either a Precedence or None.
self._start = start
self._stop = stop
self._step = step
@property
def obj(self):
def get(element):
if element is None:
return None
result = self._context.eval_node(element)
if len(result) != 1:
# For simplicity, we want slices to be clear defined with just
# one type. Otherwise we will return an empty slice object.
raise IndexError
try:
return list(result)[0].obj
except AttributeError:
return None
try:
return slice(get(self._start), get(self._stop), get(self._step))
except IndexError:
return slice(None, None, None)
def create_index_types(evaluator, context, index):
if index == ':':
# Like array[:]
return set([Slice(context, None, None, None)])
elif index.type == 'subscript': # subscript is a slice operation.
# Like array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return set([Slice(context, *result)])
# No slices
return context.eval_node(index)
| true | true |
1c3ce2f6eba465ad99d04ca8825220d7da8fc18e | 117,313 | py | Python | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/models/_models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-06-02T08:01:35.000Z | 2021-06-02T08:01:35.000Z | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/models/_models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | sdk/cdn/azure-mgmt-cdn/azure/mgmt/cdn/models/_models.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CacheExpirationActionParameters(Model):
"""Defines the parameters for the cache expiration action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters"
.
:vartype odatatype: str
:param cache_behavior: Required. Caching behavior for the requests.
Possible values include: 'BypassCache', 'Override', 'SetIfMissing'
:type cache_behavior: str or ~azure.mgmt.cdn.models.CacheBehavior
:ivar cache_type: Required. The level at which the content needs to be
cached. Default value: "All" .
:vartype cache_type: str
:param cache_duration: The duration for which the content needs to be
cached. Allowed format is [d.]hh:mm:ss
:type cache_duration: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'cache_behavior': {'required': True},
'cache_type': {'required': True, 'constant': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'cache_behavior': {'key': 'cacheBehavior', 'type': 'str'},
'cache_type': {'key': 'cacheType', 'type': 'str'},
'cache_duration': {'key': 'cacheDuration', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters"
cache_type = "All"
def __init__(self, **kwargs):
super(CacheExpirationActionParameters, self).__init__(**kwargs)
self.cache_behavior = kwargs.get('cache_behavior', None)
self.cache_duration = kwargs.get('cache_duration', None)
class CacheKeyQueryStringActionParameters(Model):
"""Defines the parameters for the cache-key query string action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheKeyQueryStringBehaviorActionParameters"
.
:vartype odatatype: str
:param query_string_behavior: Required. Caching behavior for the requests.
Possible values include: 'Include', 'IncludeAll', 'Exclude', 'ExcludeAll'
:type query_string_behavior: str or
~azure.mgmt.cdn.models.QueryStringBehavior
:param query_parameters: query parameters to include or exclude (comma
separated).
:type query_parameters: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'query_string_behavior': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'query_string_behavior': {'key': 'queryStringBehavior', 'type': 'str'},
'query_parameters': {'key': 'queryParameters', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheKeyQueryStringBehaviorActionParameters"
def __init__(self, **kwargs):
super(CacheKeyQueryStringActionParameters, self).__init__(**kwargs)
self.query_string_behavior = kwargs.get('query_string_behavior', None)
self.query_parameters = kwargs.get('query_parameters', None)
class CdnCertificateSourceParameters(Model):
"""Defines the parameters for using CDN managed certificate for securing
custom domain.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters" .
:vartype odatatype: str
:param certificate_type: Required. Type of certificate used. Possible
values include: 'Shared', 'Dedicated'
:type certificate_type: str or ~azure.mgmt.cdn.models.CertificateType
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'certificate_type': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'certificate_type': {'key': 'certificateType', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters"
def __init__(self, **kwargs):
super(CdnCertificateSourceParameters, self).__init__(**kwargs)
self.certificate_type = kwargs.get('certificate_type', None)
class CustomDomainHttpsParameters(Model):
"""The JSON object that contains the properties to secure a custom domain.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CdnManagedHttpsParameters, UserManagedHttpsParameters
All required parameters must be populated in order to send to Azure.
:param protocol_type: Required. Defines the TLS extension protocol that is
used for secure delivery. Possible values include: 'ServerNameIndication',
'IPBased'
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: TLS protocol version that will be used for
Https. Possible values include: 'None', 'TLS10', 'TLS12'
:type minimum_tls_version: str or ~azure.mgmt.cdn.models.MinimumTlsVersion
:param certificate_source: Required. Constant filled by server.
:type certificate_source: str
"""
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
}
_subtype_map = {
'certificate_source': {'Cdn': 'CdnManagedHttpsParameters', 'AzureKeyVault': 'UserManagedHttpsParameters'}
}
def __init__(self, **kwargs):
super(CustomDomainHttpsParameters, self).__init__(**kwargs)
self.protocol_type = kwargs.get('protocol_type', None)
self.minimum_tls_version = kwargs.get('minimum_tls_version', None)
self.certificate_source = None
class CdnManagedHttpsParameters(CustomDomainHttpsParameters):
"""Defines the certificate source parameters using CDN managed certificate for
enabling SSL.
All required parameters must be populated in order to send to Azure.
:param protocol_type: Required. Defines the TLS extension protocol that is
used for secure delivery. Possible values include: 'ServerNameIndication',
'IPBased'
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: TLS protocol version that will be used for
Https. Possible values include: 'None', 'TLS10', 'TLS12'
:type minimum_tls_version: str or ~azure.mgmt.cdn.models.MinimumTlsVersion
:param certificate_source: Required. Constant filled by server.
:type certificate_source: str
:param certificate_source_parameters: Required. Defines the certificate
source parameters using CDN managed certificate for enabling SSL.
:type certificate_source_parameters:
~azure.mgmt.cdn.models.CdnCertificateSourceParameters
"""
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
'certificate_source_parameters': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'certificate_source_parameters': {'key': 'certificateSourceParameters', 'type': 'CdnCertificateSourceParameters'},
}
def __init__(self, **kwargs):
super(CdnManagedHttpsParameters, self).__init__(**kwargs)
self.certificate_source_parameters = kwargs.get('certificate_source_parameters', None)
self.certificate_source = 'Cdn'
class CheckNameAvailabilityInput(Model):
"""Input of CheckNameAvailability API.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The resource name to validate.
:type name: str
:ivar type: Required. The type of the resource whose name is to be
validated. Default value: "Microsoft.Cdn/Profiles/Endpoints" .
:vartype type: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.Cdn/Profiles/Endpoints"
def __init__(self, **kwargs):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class CheckNameAvailabilityOutput(Model):
"""Output of check name availability API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name_available: Indicates whether the name is available.
:vartype name_available: bool
:ivar reason: The reason why the name is not available.
:vartype reason: str
:ivar message: The detailed error message describing why the name is not
available.
:vartype message: str
"""
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CheckNameAvailabilityOutput, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = None
class CidrIpAddress(Model):
"""CIDR Ip address.
:param base_ip_address: Ip address itself.
:type base_ip_address: str
:param prefix_length: The length of the prefix of the ip address.
:type prefix_length: int
"""
_attribute_map = {
'base_ip_address': {'key': 'baseIpAddress', 'type': 'str'},
'prefix_length': {'key': 'prefixLength', 'type': 'int'},
}
def __init__(self, **kwargs):
super(CidrIpAddress, self).__init__(**kwargs)
self.base_ip_address = kwargs.get('base_ip_address', None)
self.prefix_length = kwargs.get('prefix_length', None)
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class CookiesMatchConditionParameters(Model):
"""Defines the parameters for Cookies match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleCookiesConditionParameters" .
:vartype odatatype: str
:param selector: Required. Name of Cookies to be matched
:type selector: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.CookiesOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCookiesConditionParameters"
def __init__(self, **kwargs):
super(CookiesMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class Resource(Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have
everything other than required location and tags.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class CustomDomain(ProxyResource):
"""Friendly domain name mapping to the endpoint hostname that the customer
provides for branding purposes, e.g. www.contoso.com.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param host_name: Required. The host name of the custom domain. Must be a
domain name.
:type host_name: str
:ivar resource_state: Resource status of the custom domain. Possible
values include: 'Creating', 'Active', 'Deleting'
:vartype resource_state: str or
~azure.mgmt.cdn.models.CustomDomainResourceState
:ivar custom_https_provisioning_state: Provisioning status of Custom Https
of the custom domain. Possible values include: 'Enabling', 'Enabled',
'Disabling', 'Disabled', 'Failed'
:vartype custom_https_provisioning_state: str or
~azure.mgmt.cdn.models.CustomHttpsProvisioningState
:ivar custom_https_provisioning_substate: Provisioning substate shows the
progress of custom HTTPS enabling/disabling process step by step. Possible
values include: 'SubmittingDomainControlValidationRequest',
'PendingDomainControlValidationREquestApproval',
'DomainControlValidationRequestApproved',
'DomainControlValidationRequestRejected',
'DomainControlValidationRequestTimedOut', 'IssuingCertificate',
'DeployingCertificate', 'CertificateDeployed', 'DeletingCertificate',
'CertificateDeleted'
:vartype custom_https_provisioning_substate: str or
~azure.mgmt.cdn.models.CustomHttpsProvisioningSubstate
:param custom_https_parameters: Certificate parameters for securing custom
HTTPS
:type custom_https_parameters:
~azure.mgmt.cdn.models.CustomDomainHttpsParameters
:param validation_data: Special validation or data may be required when
delivering CDN to some regions due to local compliance reasons. E.g. ICP
license number of a custom domain is required to deliver content in China.
:type validation_data: str
:ivar provisioning_state: Provisioning status of the custom domain.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'host_name': {'required': True},
'resource_state': {'readonly': True},
'custom_https_provisioning_state': {'readonly': True},
'custom_https_provisioning_substate': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'custom_https_provisioning_state': {'key': 'properties.customHttpsProvisioningState', 'type': 'str'},
'custom_https_provisioning_substate': {'key': 'properties.customHttpsProvisioningSubstate', 'type': 'str'},
'custom_https_parameters': {'key': 'properties.customHttpsParameters', 'type': 'CustomDomainHttpsParameters'},
'validation_data': {'key': 'properties.validationData', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CustomDomain, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.resource_state = None
self.custom_https_provisioning_state = None
self.custom_https_provisioning_substate = None
self.custom_https_parameters = kwargs.get('custom_https_parameters', None)
self.validation_data = kwargs.get('validation_data', None)
self.provisioning_state = None
class CustomDomainParameters(Model):
"""The customDomain JSON object required for custom domain creation or update.
All required parameters must be populated in order to send to Azure.
:param host_name: Required. The host name of the custom domain. Must be a
domain name.
:type host_name: str
"""
_validation = {
'host_name': {'required': True},
}
_attribute_map = {
'host_name': {'key': 'properties.hostName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CustomDomainParameters, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
class DeepCreatedOrigin(Model):
"""The main origin of CDN content which is added when creating a CDN endpoint.
All required parameters must be populated in order to send to Azure.
:param name: Required. Origin name
:type name: str
:param host_name: Required. The address of the origin. It can be a domain
name, IPv4 address, or IPv6 address.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535
:type http_port: int
:param https_port: The value of the HTTPS port. Must be between 1 and
65535
:type https_port: int
"""
_validation = {
'name': {'required': True},
'host_name': {'required': True},
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(DeepCreatedOrigin, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
class DeliveryRule(Model):
"""A rule that specifies a set of actions and conditions.
All required parameters must be populated in order to send to Azure.
:param name: Name of the rule
:type name: str
:param order: Required. The order in which the rules are applied for the
endpoint. Possible values {0,1,2,3,………}. A rule with a lesser order will
be applied before a rule with a greater order. Rule with order 0 is a
special rule. It does not require any condition and actions listed in it
will always be applied.
:type order: int
:param conditions: A list of conditions that must be matched for the
actions to be executed
:type conditions: list[~azure.mgmt.cdn.models.DeliveryRuleCondition]
:param actions: Required. A list of actions that are executed when all the
conditions of a rule are satisfied.
:type actions: list[~azure.mgmt.cdn.models.DeliveryRuleAction]
"""
_validation = {
'order': {'required': True},
'actions': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'conditions': {'key': 'conditions', 'type': '[DeliveryRuleCondition]'},
'actions': {'key': 'actions', 'type': '[DeliveryRuleAction]'},
}
def __init__(self, **kwargs):
super(DeliveryRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.order = kwargs.get('order', None)
self.conditions = kwargs.get('conditions', None)
self.actions = kwargs.get('actions', None)
class DeliveryRuleAction(Model):
"""An action for the delivery rule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UrlRedirectAction, UrlRewriteAction,
DeliveryRuleRequestHeaderAction, DeliveryRuleResponseHeaderAction,
DeliveryRuleCacheExpirationAction, DeliveryRuleCacheKeyQueryStringAction
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'name': {'UrlRedirect': 'UrlRedirectAction', 'UrlRewrite': 'UrlRewriteAction', 'ModifyRequestHeader': 'DeliveryRuleRequestHeaderAction', 'ModifyResponseHeader': 'DeliveryRuleResponseHeaderAction', 'CacheExpiration': 'DeliveryRuleCacheExpirationAction', 'CacheKeyQueryString': 'DeliveryRuleCacheKeyQueryStringAction'}
}
def __init__(self, **kwargs):
super(DeliveryRuleAction, self).__init__(**kwargs)
self.name = None
class DeliveryRuleCacheExpirationAction(DeliveryRuleAction):
"""Defines the cache expiration action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.CacheExpirationActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CacheExpirationActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCacheExpirationAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'CacheExpiration'
class DeliveryRuleCacheKeyQueryStringAction(DeliveryRuleAction):
"""Defines the cache-key query string action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters:
~azure.mgmt.cdn.models.CacheKeyQueryStringActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CacheKeyQueryStringActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCacheKeyQueryStringAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'CacheKeyQueryString'
class DeliveryRuleCondition(Model):
"""A condition for the delivery rule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DeliveryRuleRemoteAddressCondition,
DeliveryRuleRequestMethodCondition, DeliveryRuleQueryStringCondition,
DeliveryRulePostArgsCondition, DeliveryRuleRequestUriCondition,
DeliveryRuleRequestHeaderCondition, DeliveryRuleRequestBodyCondition,
DeliveryRuleRequestSchemeCondition, DeliveryRuleUrlPathCondition,
DeliveryRuleUrlFileExtensionCondition, DeliveryRuleUrlFileNameCondition,
DeliveryRuleHttpVersionCondition, DeliveryRuleCookiesCondition,
DeliveryRuleIsDeviceCondition
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'name': {'RemoteAddress': 'DeliveryRuleRemoteAddressCondition', 'RequestMethod': 'DeliveryRuleRequestMethodCondition', 'QueryString': 'DeliveryRuleQueryStringCondition', 'PostArgs': 'DeliveryRulePostArgsCondition', 'RequestUri': 'DeliveryRuleRequestUriCondition', 'RequestHeader': 'DeliveryRuleRequestHeaderCondition', 'RequestBody': 'DeliveryRuleRequestBodyCondition', 'RequestScheme': 'DeliveryRuleRequestSchemeCondition', 'UrlPath': 'DeliveryRuleUrlPathCondition', 'UrlFileExtension': 'DeliveryRuleUrlFileExtensionCondition', 'UrlFileName': 'DeliveryRuleUrlFileNameCondition', 'HttpVersion': 'DeliveryRuleHttpVersionCondition', 'Cookies': 'DeliveryRuleCookiesCondition', 'IsDevice': 'DeliveryRuleIsDeviceCondition'}
}
def __init__(self, **kwargs):
super(DeliveryRuleCondition, self).__init__(**kwargs)
self.name = None
class DeliveryRuleCookiesCondition(DeliveryRuleCondition):
"""Defines the Cookies condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters: ~azure.mgmt.cdn.models.CookiesMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CookiesMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCookiesCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'Cookies'
class DeliveryRuleHttpVersionCondition(DeliveryRuleCondition):
"""Defines the HttpVersion condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.HttpVersionMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HttpVersionMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleHttpVersionCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'HttpVersion'
class DeliveryRuleIsDeviceCondition(DeliveryRuleCondition):
"""Defines the IsDevice condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters: ~azure.mgmt.cdn.models.IsDeviceMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'IsDeviceMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleIsDeviceCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'IsDevice'
class DeliveryRulePostArgsCondition(DeliveryRuleCondition):
"""Defines the PostArgs condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters: ~azure.mgmt.cdn.models.PostArgsMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'PostArgsMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRulePostArgsCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'PostArgs'
class DeliveryRuleQueryStringCondition(DeliveryRuleCondition):
"""Defines the QueryString condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.QueryStringMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'QueryStringMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleQueryStringCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'QueryString'
class DeliveryRuleRemoteAddressCondition(DeliveryRuleCondition):
"""Defines the RemoteAddress condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RemoteAddressMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RemoteAddressMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRemoteAddressCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RemoteAddress'
class DeliveryRuleRequestBodyCondition(DeliveryRuleCondition):
"""Defines the RequestBody condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RequestBodyMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestBodyMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestBodyCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestBody'
class DeliveryRuleRequestHeaderAction(DeliveryRuleAction):
"""Defines the request header action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.HeaderActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HeaderActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestHeaderAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'ModifyRequestHeader'
class DeliveryRuleRequestHeaderCondition(DeliveryRuleCondition):
"""Defines the RequestHeader condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RequestHeaderMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestHeaderMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestHeaderCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestHeader'
class DeliveryRuleRequestMethodCondition(DeliveryRuleCondition):
"""Defines the RequestMethod condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RequestMethodMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestMethodMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestMethodCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestMethod'
class DeliveryRuleRequestSchemeCondition(DeliveryRuleCondition):
"""Defines the RequestScheme condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RequestSchemeMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestSchemeMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestSchemeCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestScheme'
class DeliveryRuleRequestUriCondition(DeliveryRuleCondition):
"""Defines the RequestUri condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.RequestUriMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestUriMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestUriCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestUri'
class DeliveryRuleResponseHeaderAction(DeliveryRuleAction):
"""Defines the response header action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.HeaderActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HeaderActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleResponseHeaderAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'ModifyResponseHeader'
class DeliveryRuleUrlFileExtensionCondition(DeliveryRuleCondition):
"""Defines the UrlFileExtension condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.UrlFileExtensionMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlFileExtensionMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlFileExtensionCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlFileExtension'
class DeliveryRuleUrlFileNameCondition(DeliveryRuleCondition):
"""Defines the UrlFileName condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters:
~azure.mgmt.cdn.models.UrlFileNameMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlFileNameMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlFileNameCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlFileName'
class DeliveryRuleUrlPathCondition(DeliveryRuleCondition):
"""Defines the UrlPath condition for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the condition.
:type parameters: ~azure.mgmt.cdn.models.UrlPathMatchConditionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlPathMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlPathCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlPath'
class EdgeNode(ProxyResource):
"""Edgenode is a global Point of Presence (POP) location used to deliver CDN
content to end users.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param ip_address_groups: Required. List of ip address groups.
:type ip_address_groups: list[~azure.mgmt.cdn.models.IpAddressGroup]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'ip_address_groups': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'ip_address_groups': {'key': 'properties.ipAddressGroups', 'type': '[IpAddressGroup]'},
}
def __init__(self, **kwargs):
super(EdgeNode, self).__init__(**kwargs)
self.ip_address_groups = kwargs.get('ip_address_groups', None)
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class Endpoint(TrackedResource):
"""CDN endpoint is the entity within a CDN profile containing configuration
information such as origin, protocol, content caching and delivery
behavior. The CDN endpoint uses the URL format
<endpointname>.azureedge.net.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param origin_host_header: The host header value sent to the origin with
each request. If you leave this blank, the request hostname determines
this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud
Services require this host header value to match the origin hostname by
default.
:type origin_host_header: str
:param origin_path: A directory path on the origin that CDN can use to
retrieve content from, e.g. contoso.cloudapp.net/originpath.
:type origin_path: str
:param content_types_to_compress: List of content types on which
compression applies. The value should be a valid MIME type.
:type content_types_to_compress: list[str]
:param is_compression_enabled: Indicates whether content compression is
enabled on CDN. Default value is false. If compression is enabled, content
will be served as compressed if user requests for a compressed version.
Content won't be compressed on CDN when requested content is smaller than
1 byte or larger than 1 MB.
:type is_compression_enabled: bool
:param is_http_allowed: Indicates whether HTTP traffic is allowed on the
endpoint. Default value is true. At least one protocol (HTTP or HTTPS)
must be allowed.
:type is_http_allowed: bool
:param is_https_allowed: Indicates whether HTTPS traffic is allowed on the
endpoint. Default value is true. At least one protocol (HTTP or HTTPS)
must be allowed.
:type is_https_allowed: bool
:param query_string_caching_behavior: Defines how CDN caches requests that
include query strings. You can ignore any query strings when caching,
bypass caching to prevent requests that contain query strings from being
cached, or cache every request with a unique URL. Possible values include:
'IgnoreQueryString', 'BypassCaching', 'UseQueryString', 'NotSet'
:type query_string_caching_behavior: str or
~azure.mgmt.cdn.models.QueryStringCachingBehavior
:param optimization_type: Specifies what scenario the customer wants this
CDN endpoint to optimize for, e.g. Download, Media services. With this
information, CDN can apply scenario driven optimization. Possible values
include: 'GeneralWebDelivery', 'GeneralMediaStreaming',
'VideoOnDemandMediaStreaming', 'LargeFileDownload',
'DynamicSiteAcceleration'
:type optimization_type: str or ~azure.mgmt.cdn.models.OptimizationType
:param probe_path: Path to a file hosted on the origin which helps
accelerate delivery of the dynamic content and calculate the most optimal
routes for the CDN. This is relative to the origin path.
:type probe_path: str
:param geo_filters: List of rules defining the user's geo access within a
CDN endpoint. Each geo filter defines an access rule to a specified path
or content, e.g. block APAC for path /pictures/
:type geo_filters: list[~azure.mgmt.cdn.models.GeoFilter]
:param delivery_policy: A policy that specifies the delivery rules to be
used for an endpoint.
:type delivery_policy:
~azure.mgmt.cdn.models.EndpointPropertiesUpdateParametersDeliveryPolicy
:ivar host_name: The host name of the endpoint structured as
{endpointName}.{DNSZone}, e.g. contoso.azureedge.net
:vartype host_name: str
:param origins: Required. The source of the content being delivered via
CDN.
:type origins: list[~azure.mgmt.cdn.models.DeepCreatedOrigin]
:ivar resource_state: Resource status of the endpoint. Possible values
include: 'Creating', 'Deleting', 'Running', 'Starting', 'Stopped',
'Stopping'
:vartype resource_state: str or
~azure.mgmt.cdn.models.EndpointResourceState
:ivar provisioning_state: Provisioning status of the endpoint.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'host_name': {'readonly': True},
'origins': {'required': True},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'origin_path': {'key': 'properties.originPath', 'type': 'str'},
'content_types_to_compress': {'key': 'properties.contentTypesToCompress', 'type': '[str]'},
'is_compression_enabled': {'key': 'properties.isCompressionEnabled', 'type': 'bool'},
'is_http_allowed': {'key': 'properties.isHttpAllowed', 'type': 'bool'},
'is_https_allowed': {'key': 'properties.isHttpsAllowed', 'type': 'bool'},
'query_string_caching_behavior': {'key': 'properties.queryStringCachingBehavior', 'type': 'QueryStringCachingBehavior'},
'optimization_type': {'key': 'properties.optimizationType', 'type': 'str'},
'probe_path': {'key': 'properties.probePath', 'type': 'str'},
'geo_filters': {'key': 'properties.geoFilters', 'type': '[GeoFilter]'},
'delivery_policy': {'key': 'properties.deliveryPolicy', 'type': 'EndpointPropertiesUpdateParametersDeliveryPolicy'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'origins': {'key': 'properties.origins', 'type': '[DeepCreatedOrigin]'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Endpoint, self).__init__(**kwargs)
self.origin_host_header = kwargs.get('origin_host_header', None)
self.origin_path = kwargs.get('origin_path', None)
self.content_types_to_compress = kwargs.get('content_types_to_compress', None)
self.is_compression_enabled = kwargs.get('is_compression_enabled', None)
self.is_http_allowed = kwargs.get('is_http_allowed', None)
self.is_https_allowed = kwargs.get('is_https_allowed', None)
self.query_string_caching_behavior = kwargs.get('query_string_caching_behavior', None)
self.optimization_type = kwargs.get('optimization_type', None)
self.probe_path = kwargs.get('probe_path', None)
self.geo_filters = kwargs.get('geo_filters', None)
self.delivery_policy = kwargs.get('delivery_policy', None)
self.host_name = None
self.origins = kwargs.get('origins', None)
self.resource_state = None
self.provisioning_state = None
class EndpointPropertiesUpdateParametersDeliveryPolicy(Model):
"""A policy that specifies the delivery rules to be used for an endpoint.
All required parameters must be populated in order to send to Azure.
:param description: User-friendly description of the policy.
:type description: str
:param rules: Required. A list of the delivery rules.
:type rules: list[~azure.mgmt.cdn.models.DeliveryRule]
"""
_validation = {
'rules': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[DeliveryRule]'},
}
def __init__(self, **kwargs):
super(EndpointPropertiesUpdateParametersDeliveryPolicy, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.rules = kwargs.get('rules', None)
class EndpointUpdateParameters(Model):
"""Properties required to create or update an endpoint.
:param tags: Endpoint tags.
:type tags: dict[str, str]
:param origin_host_header: The host header value sent to the origin with
each request. If you leave this blank, the request hostname determines
this value. Azure CDN origins, such as Web Apps, Blob Storage, and Cloud
Services require this host header value to match the origin hostname by
default.
:type origin_host_header: str
:param origin_path: A directory path on the origin that CDN can use to
retrieve content from, e.g. contoso.cloudapp.net/originpath.
:type origin_path: str
:param content_types_to_compress: List of content types on which
compression applies. The value should be a valid MIME type.
:type content_types_to_compress: list[str]
:param is_compression_enabled: Indicates whether content compression is
enabled on CDN. Default value is false. If compression is enabled, content
will be served as compressed if user requests for a compressed version.
Content won't be compressed on CDN when requested content is smaller than
1 byte or larger than 1 MB.
:type is_compression_enabled: bool
:param is_http_allowed: Indicates whether HTTP traffic is allowed on the
endpoint. Default value is true. At least one protocol (HTTP or HTTPS)
must be allowed.
:type is_http_allowed: bool
:param is_https_allowed: Indicates whether HTTPS traffic is allowed on the
endpoint. Default value is true. At least one protocol (HTTP or HTTPS)
must be allowed.
:type is_https_allowed: bool
:param query_string_caching_behavior: Defines how CDN caches requests that
include query strings. You can ignore any query strings when caching,
bypass caching to prevent requests that contain query strings from being
cached, or cache every request with a unique URL. Possible values include:
'IgnoreQueryString', 'BypassCaching', 'UseQueryString', 'NotSet'
:type query_string_caching_behavior: str or
~azure.mgmt.cdn.models.QueryStringCachingBehavior
:param optimization_type: Specifies what scenario the customer wants this
CDN endpoint to optimize for, e.g. Download, Media services. With this
information, CDN can apply scenario driven optimization. Possible values
include: 'GeneralWebDelivery', 'GeneralMediaStreaming',
'VideoOnDemandMediaStreaming', 'LargeFileDownload',
'DynamicSiteAcceleration'
:type optimization_type: str or ~azure.mgmt.cdn.models.OptimizationType
:param probe_path: Path to a file hosted on the origin which helps
accelerate delivery of the dynamic content and calculate the most optimal
routes for the CDN. This is relative to the origin path.
:type probe_path: str
:param geo_filters: List of rules defining the user's geo access within a
CDN endpoint. Each geo filter defines an access rule to a specified path
or content, e.g. block APAC for path /pictures/
:type geo_filters: list[~azure.mgmt.cdn.models.GeoFilter]
:param delivery_policy: A policy that specifies the delivery rules to be
used for an endpoint.
:type delivery_policy:
~azure.mgmt.cdn.models.EndpointPropertiesUpdateParametersDeliveryPolicy
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'origin_path': {'key': 'properties.originPath', 'type': 'str'},
'content_types_to_compress': {'key': 'properties.contentTypesToCompress', 'type': '[str]'},
'is_compression_enabled': {'key': 'properties.isCompressionEnabled', 'type': 'bool'},
'is_http_allowed': {'key': 'properties.isHttpAllowed', 'type': 'bool'},
'is_https_allowed': {'key': 'properties.isHttpsAllowed', 'type': 'bool'},
'query_string_caching_behavior': {'key': 'properties.queryStringCachingBehavior', 'type': 'QueryStringCachingBehavior'},
'optimization_type': {'key': 'properties.optimizationType', 'type': 'str'},
'probe_path': {'key': 'properties.probePath', 'type': 'str'},
'geo_filters': {'key': 'properties.geoFilters', 'type': '[GeoFilter]'},
'delivery_policy': {'key': 'properties.deliveryPolicy', 'type': 'EndpointPropertiesUpdateParametersDeliveryPolicy'},
}
def __init__(self, **kwargs):
super(EndpointUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.origin_host_header = kwargs.get('origin_host_header', None)
self.origin_path = kwargs.get('origin_path', None)
self.content_types_to_compress = kwargs.get('content_types_to_compress', None)
self.is_compression_enabled = kwargs.get('is_compression_enabled', None)
self.is_http_allowed = kwargs.get('is_http_allowed', None)
self.is_https_allowed = kwargs.get('is_https_allowed', None)
self.query_string_caching_behavior = kwargs.get('query_string_caching_behavior', None)
self.optimization_type = kwargs.get('optimization_type', None)
self.probe_path = kwargs.get('probe_path', None)
self.geo_filters = kwargs.get('geo_filters', None)
self.delivery_policy = kwargs.get('delivery_policy', None)
class ErrorResponse(Model):
"""Error response indicates CDN service is not able to process the incoming
request. The reason is provided in the error message.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: Error code.
:vartype code: str
:ivar message: Error message indicating why the operation failed.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class GeoFilter(Model):
"""Rules defining user's geo access within a CDN endpoint.
All required parameters must be populated in order to send to Azure.
:param relative_path: Required. Relative path applicable to geo filter.
(e.g. '/mypictures', '/mypicture/kitty.jpg', and etc.)
:type relative_path: str
:param action: Required. Action of the geo filter, i.e. allow or block
access. Possible values include: 'Block', 'Allow'
:type action: str or ~azure.mgmt.cdn.models.GeoFilterActions
:param country_codes: Required. Two letter country codes defining user
country access in a geo filter, e.g. AU, MX, US.
:type country_codes: list[str]
"""
_validation = {
'relative_path': {'required': True},
'action': {'required': True},
'country_codes': {'required': True},
}
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'action': {'key': 'action', 'type': 'GeoFilterActions'},
'country_codes': {'key': 'countryCodes', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(GeoFilter, self).__init__(**kwargs)
self.relative_path = kwargs.get('relative_path', None)
self.action = kwargs.get('action', None)
self.country_codes = kwargs.get('country_codes', None)
class HeaderActionParameters(Model):
"""Defines the parameters for the request header action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters" .
:vartype odatatype: str
:param header_action: Required. Action to perform. Possible values
include: 'Append', 'Overwrite', 'Delete'
:type header_action: str or ~azure.mgmt.cdn.models.HeaderAction
:param header_name: Required. Name of the header to modify
:type header_name: str
:param value: Value for the specified action
:type value: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'header_action': {'required': True},
'header_name': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'header_action': {'key': 'headerAction', 'type': 'str'},
'header_name': {'key': 'headerName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters"
def __init__(self, **kwargs):
super(HeaderActionParameters, self).__init__(**kwargs)
self.header_action = kwargs.get('header_action', None)
self.header_name = kwargs.get('header_name', None)
self.value = kwargs.get('value', None)
class HttpVersionMatchConditionParameters(Model):
"""Defines the parameters for HttpVersion match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleHttpVersionConditionParameters" .
:vartype odatatype: str
:ivar operator: Required. Describes operator to be matched. Default value:
"Equal" .
:vartype operator: str
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleHttpVersionConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(HttpVersionMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class IpAddressGroup(Model):
"""CDN Ip address group.
:param delivery_region: The delivery region of the ip address group
:type delivery_region: str
:param ipv4_addresses: The list of ip v4 addresses.
:type ipv4_addresses: list[~azure.mgmt.cdn.models.CidrIpAddress]
:param ipv6_addresses: The list of ip v6 addresses.
:type ipv6_addresses: list[~azure.mgmt.cdn.models.CidrIpAddress]
"""
_attribute_map = {
'delivery_region': {'key': 'deliveryRegion', 'type': 'str'},
'ipv4_addresses': {'key': 'ipv4Addresses', 'type': '[CidrIpAddress]'},
'ipv6_addresses': {'key': 'ipv6Addresses', 'type': '[CidrIpAddress]'},
}
def __init__(self, **kwargs):
super(IpAddressGroup, self).__init__(**kwargs)
self.delivery_region = kwargs.get('delivery_region', None)
self.ipv4_addresses = kwargs.get('ipv4_addresses', None)
self.ipv6_addresses = kwargs.get('ipv6_addresses', None)
class IsDeviceMatchConditionParameters(Model):
"""Defines the parameters for IsDevice match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleIsDeviceConditionParameters" .
:vartype odatatype: str
:ivar operator: Required. Describes operator to be matched. Default value:
"Equal" .
:vartype operator: str
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleIsDeviceConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(IsDeviceMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class KeyVaultCertificateSourceParameters(Model):
"""Describes the parameters for using a user's KeyVault certificate for
securing custom domain.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.KeyVaultCertificateSourceParameters" .
:vartype odatatype: str
:param subscription_id: Required. Subscription Id of the user's Key Vault
containing the SSL certificate
:type subscription_id: str
:param resource_group_name: Required. Resource group of the user's Key
Vault containing the SSL certificate
:type resource_group_name: str
:param vault_name: Required. The name of the user's Key Vault containing
the SSL certificate
:type vault_name: str
:param secret_name: Required. The name of Key Vault Secret (representing
the full certificate PFX) in Key Vault.
:type secret_name: str
:param secret_version: Required. The version(GUID) of Key Vault Secret in
Key Vault.
:type secret_version: str
:ivar update_rule: Required. Describes the action that shall be taken when
the certificate is updated in Key Vault. Default value: "NoAction" .
:vartype update_rule: str
:ivar delete_rule: Required. Describes the action that shall be taken when
the certificate is removed from Key Vault. Default value: "NoAction" .
:vartype delete_rule: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'subscription_id': {'required': True},
'resource_group_name': {'required': True},
'vault_name': {'required': True},
'secret_name': {'required': True},
'secret_version': {'required': True},
'update_rule': {'required': True, 'constant': True},
'delete_rule': {'required': True, 'constant': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'vault_name': {'key': 'vaultName', 'type': 'str'},
'secret_name': {'key': 'secretName', 'type': 'str'},
'secret_version': {'key': 'secretVersion', 'type': 'str'},
'update_rule': {'key': 'updateRule', 'type': 'str'},
'delete_rule': {'key': 'deleteRule', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.KeyVaultCertificateSourceParameters"
update_rule = "NoAction"
delete_rule = "NoAction"
def __init__(self, **kwargs):
super(KeyVaultCertificateSourceParameters, self).__init__(**kwargs)
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group_name = kwargs.get('resource_group_name', None)
self.vault_name = kwargs.get('vault_name', None)
self.secret_name = kwargs.get('secret_name', None)
self.secret_version = kwargs.get('secret_version', None)
class LoadParameters(Model):
"""Parameters required for content load.
All required parameters must be populated in order to send to Azure.
:param content_paths: Required. The path to the content to be loaded. Path
should be a relative file URL of the origin.
:type content_paths: list[str]
"""
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(LoadParameters, self).__init__(**kwargs)
self.content_paths = kwargs.get('content_paths', None)
class Operation(Model):
"""CDN REST API operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Operation name: {provider}/{resource}/{operation}
:vartype name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.cdn.models.OperationDisplay
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = kwargs.get('display', None)
class OperationDisplay(Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: Service provider: Microsoft.Cdn
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Profile,
endpoint, etc.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class Origin(TrackedResource):
"""CDN origin is the source of the content being delivered via CDN. When the
edge nodes represented by an endpoint do not have the requested content
cached, they attempt to fetch it from one or more of the configured
origins.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param host_name: Required. The address of the origin. Domain names, IPv4
addresses, and IPv6 addresses are supported.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535.
:type http_port: int
:param https_port: The value of the https port. Must be between 1 and
65535.
:type https_port: int
:ivar resource_state: Resource status of the origin. Possible values
include: 'Creating', 'Active', 'Deleting'
:vartype resource_state: str or ~azure.mgmt.cdn.models.OriginResourceState
:ivar provisioning_state: Provisioning status of the origin.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'host_name': {'required': True},
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Origin, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
self.resource_state = None
self.provisioning_state = None
class OriginUpdateParameters(Model):
"""Origin properties needed for origin creation or update.
:param host_name: The address of the origin. Domain names, IPv4 addresses,
and IPv6 addresses are supported.
:type host_name: str
:param http_port: The value of the HTTP port. Must be between 1 and 65535.
:type http_port: int
:param https_port: The value of the HTTPS port. Must be between 1 and
65535.
:type https_port: int
"""
_validation = {
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
}
_attribute_map = {
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(OriginUpdateParameters, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
class PostArgsMatchConditionParameters(Model):
"""Defines the parameters for PostArgs match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRulePostArgsConditionParameters" .
:vartype odatatype: str
:param selector: Required. Name of PostArg to be matched
:type selector: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.PostArgsOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRulePostArgsConditionParameters"
def __init__(self, **kwargs):
super(PostArgsMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class Profile(TrackedResource):
"""CDN profile is a logical grouping of endpoints that share the same
settings, such as CDN provider and pricing tier.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Required. Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param sku: Required. The pricing tier (defines a CDN provider, feature
list and rate) of the CDN profile.
:type sku: ~azure.mgmt.cdn.models.Sku
:ivar resource_state: Resource status of the profile. Possible values
include: 'Creating', 'Active', 'Deleting', 'Disabled'
:vartype resource_state: str or
~azure.mgmt.cdn.models.ProfileResourceState
:ivar provisioning_state: Provisioning status of the profile.
:vartype provisioning_state: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Profile, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.resource_state = None
self.provisioning_state = None
class ProfileUpdateParameters(Model):
"""Properties required to update a profile.
:param tags: Profile tags
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ProfileUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class PurgeParameters(Model):
"""Parameters required for content purge.
All required parameters must be populated in order to send to Azure.
:param content_paths: Required. The path to the content to be purged. Can
describe a file path or a wild card directory.
:type content_paths: list[str]
"""
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(PurgeParameters, self).__init__(**kwargs)
self.content_paths = kwargs.get('content_paths', None)
class QueryStringMatchConditionParameters(Model):
"""Defines the parameters for QueryString match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleQueryStringConditionParameters" .
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.QueryStringOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleQueryStringConditionParameters"
def __init__(self, **kwargs):
super(QueryStringMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RemoteAddressMatchConditionParameters(Model):
"""Defines the parameters for RemoteAddress match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRemoteAddressConditionParameters"
.
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'IPMatch', 'GeoMatch'
:type operator: str or ~azure.mgmt.cdn.models.RemoteAddressOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. Match values to match against. The operator
will apply to each value in here with OR semantics. If any of them match
the variable with the given operator this match condition is considered a
match.
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRemoteAddressConditionParameters"
def __init__(self, **kwargs):
super(RemoteAddressMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestBodyMatchConditionParameters(Model):
"""Defines the parameters for RequestBody match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestBodyConditionParameters" .
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.RequestBodyOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestBodyConditionParameters"
def __init__(self, **kwargs):
super(RequestBodyMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestHeaderMatchConditionParameters(Model):
"""Defines the parameters for RequestHeader match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestHeaderConditionParameters"
.
:vartype odatatype: str
:param selector: Required. Name of Header to be matched
:type selector: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.RequestHeaderOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestHeaderConditionParameters"
def __init__(self, **kwargs):
super(RequestHeaderMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestMethodMatchConditionParameters(Model):
"""Defines the parameters for RequestMethod match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestMethodConditionParameters"
.
:vartype odatatype: str
:ivar operator: Required. Describes operator to be matched. Default value:
"Equal" .
:vartype operator: str
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestMethodConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(RequestMethodMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class RequestSchemeMatchConditionParameters(Model):
"""Defines the parameters for RequestScheme match conditions .
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestSchemeConditionParameters"
.
:vartype odatatype: str
:ivar operator: Required. Describes operator to be matched. Default value:
"Equal" .
:vartype operator: str
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestSchemeConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(RequestSchemeMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class RequestUriMatchConditionParameters(Model):
"""Defines the parameters for RequestUri match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestUriConditionParameters" .
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.RequestUriOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestUriConditionParameters"
def __init__(self, **kwargs):
super(RequestUriMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class ResourceUsage(Model):
"""Output of check resource usage API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar resource_type: Resource type for which the usage is provided.
:vartype resource_type: str
:ivar unit: Unit of the usage. e.g. Count.
:vartype unit: str
:ivar current_value: Actual value of usage on the specified resource type.
:vartype current_value: int
:ivar limit: Quota of the specified resource type.
:vartype limit: int
"""
_validation = {
'resource_type': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ResourceUsage, self).__init__(**kwargs)
self.resource_type = None
self.unit = None
self.current_value = None
self.limit = None
class Sku(Model):
"""The pricing tier (defines a CDN provider, feature list and rate) of the CDN
profile.
:param name: Name of the pricing tier. Possible values include:
'Standard_Verizon', 'Premium_Verizon', 'Custom_Verizon',
'Standard_Akamai', 'Standard_ChinaCdn', 'Standard_Microsoft',
'Premium_ChinaCdn'
:type name: str or ~azure.mgmt.cdn.models.SkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class SsoUri(Model):
"""The URI required to login to the supplemental portal from the Azure portal.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar sso_uri_value: The URI used to login to the supplemental portal.
:vartype sso_uri_value: str
"""
_validation = {
'sso_uri_value': {'readonly': True},
}
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SsoUri, self).__init__(**kwargs)
self.sso_uri_value = None
class SupportedOptimizationTypesListResult(Model):
"""The result of the GetSupportedOptimizationTypes API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar supported_optimization_types: Supported optimization types for a
profile.
:vartype supported_optimization_types: list[str or
~azure.mgmt.cdn.models.OptimizationType]
"""
_validation = {
'supported_optimization_types': {'readonly': True},
}
_attribute_map = {
'supported_optimization_types': {'key': 'supportedOptimizationTypes', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(SupportedOptimizationTypesListResult, self).__init__(**kwargs)
self.supported_optimization_types = None
class UrlFileExtensionMatchConditionParameters(Model):
"""Defines the parameters for UrlFileExtension match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFileExtensionMatchConditionParameters"
.
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.UrlFileExtensionOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFileExtensionMatchConditionParameters"
def __init__(self, **kwargs):
super(UrlFileExtensionMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlFileNameMatchConditionParameters(Model):
"""Defines the parameters for UrlFilename match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFilenameConditionParameters" .
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual'
:type operator: str or ~azure.mgmt.cdn.models.UrlFileNameOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFilenameConditionParameters"
def __init__(self, **kwargs):
super(UrlFileNameMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlPathMatchConditionParameters(Model):
"""Defines the parameters for UrlPath match conditions.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlPathMatchConditionParameters"
.
:vartype odatatype: str
:param operator: Required. Describes operator to be matched. Possible
values include: 'Any', 'Equal', 'Contains', 'BeginsWith', 'EndsWith',
'LessThan', 'LessThanOrEqual', 'GreaterThan', 'GreaterThanOrEqual',
'Wildcard'
:type operator: str or ~azure.mgmt.cdn.models.UrlPathOperator
:param negate_condition: Describes if this is negate condition or not
:type negate_condition: bool
:param match_values: Required. The match value for the condition of the
delivery rule
:type match_values: list[str]
:param transforms: List of transforms
:type transforms: list[str or ~azure.mgmt.cdn.models.Transform]
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlPathMatchConditionParameters"
def __init__(self, **kwargs):
super(UrlPathMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlRedirectAction(DeliveryRuleAction):
"""Defines the url redirect action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.UrlRedirectActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlRedirectActionParameters'},
}
def __init__(self, **kwargs):
super(UrlRedirectAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlRedirect'
class UrlRedirectActionParameters(Model):
"""Defines the parameters for the url redirect action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRedirectActionParameters" .
:vartype odatatype: str
:param redirect_type: Required. The redirect type the rule will use when
redirecting traffic. Possible values include: 'Moved', 'Found',
'TemporaryRedirect', 'PermanentRedirect'
:type redirect_type: str or ~azure.mgmt.cdn.models.RedirectType
:param destination_protocol: Protocol to use for the redirect. The default
value is MatchRequest. Possible values include: 'MatchRequest', 'Http',
'Https'
:type destination_protocol: str or
~azure.mgmt.cdn.models.DestinationProtocol
:param custom_path: The full path to redirect. Path cannot be empty and
must start with /. Leave empty to use the incoming path as destination
path.
:type custom_path: str
:param custom_hostname: Host to redirect. Leave empty to use the incoming
host as the destination host.
:type custom_hostname: str
:param custom_query_string: The set of query strings to be placed in the
redirect URL. Setting this value would replace any existing query string;
leave empty to preserve the incoming query string. Query string must be in
<key>=<value> format. ? and & will be added automatically so do not
include them.
:type custom_query_string: str
:param custom_fragment: Fragment to add to the redirect URL. Fragment is
the part of the URL that comes after #. Do not include the #.
:type custom_fragment: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'redirect_type': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'redirect_type': {'key': 'redirectType', 'type': 'str'},
'destination_protocol': {'key': 'destinationProtocol', 'type': 'str'},
'custom_path': {'key': 'customPath', 'type': 'str'},
'custom_hostname': {'key': 'customHostname', 'type': 'str'},
'custom_query_string': {'key': 'customQueryString', 'type': 'str'},
'custom_fragment': {'key': 'customFragment', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRedirectActionParameters"
def __init__(self, **kwargs):
super(UrlRedirectActionParameters, self).__init__(**kwargs)
self.redirect_type = kwargs.get('redirect_type', None)
self.destination_protocol = kwargs.get('destination_protocol', None)
self.custom_path = kwargs.get('custom_path', None)
self.custom_hostname = kwargs.get('custom_hostname', None)
self.custom_query_string = kwargs.get('custom_query_string', None)
self.custom_fragment = kwargs.get('custom_fragment', None)
class UrlRewriteAction(DeliveryRuleAction):
"""Defines the url rewrite action for the delivery rule.
All required parameters must be populated in order to send to Azure.
:param name: Required. Constant filled by server.
:type name: str
:param parameters: Required. Defines the parameters for the action.
:type parameters: ~azure.mgmt.cdn.models.UrlRewriteActionParameters
"""
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlRewriteActionParameters'},
}
def __init__(self, **kwargs):
super(UrlRewriteAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlRewrite'
class UrlRewriteActionParameters(Model):
"""Defines the parameters for the url rewrite action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRewriteActionParameters" .
:vartype odatatype: str
:param source_pattern: Required. define a request URI pattern that
identifies the type of requests that may be rewritten. Currently, source
pattern uses a prefix-based match. To match all URL paths, use "/" as the
source pattern value. To match only the root directory and re-write this
path, use the origin path field
:type source_pattern: str
:param destination: Required. Define the destination path for be used in
the rewrite. This will overwrite the source pattern
:type destination: str
:param preserve_unmatched_path: If True, the remaining path after the
source pattern will be appended to the new destination path.
:type preserve_unmatched_path: bool
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'source_pattern': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'source_pattern': {'key': 'sourcePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'str'},
'preserve_unmatched_path': {'key': 'preserveUnmatchedPath', 'type': 'bool'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRewriteActionParameters"
def __init__(self, **kwargs):
super(UrlRewriteActionParameters, self).__init__(**kwargs)
self.source_pattern = kwargs.get('source_pattern', None)
self.destination = kwargs.get('destination', None)
self.preserve_unmatched_path = kwargs.get('preserve_unmatched_path', None)
class UserManagedHttpsParameters(CustomDomainHttpsParameters):
"""Defines the certificate source parameters using user's keyvault certificate
for enabling SSL.
All required parameters must be populated in order to send to Azure.
:param protocol_type: Required. Defines the TLS extension protocol that is
used for secure delivery. Possible values include: 'ServerNameIndication',
'IPBased'
:type protocol_type: str or ~azure.mgmt.cdn.models.ProtocolType
:param minimum_tls_version: TLS protocol version that will be used for
Https. Possible values include: 'None', 'TLS10', 'TLS12'
:type minimum_tls_version: str or ~azure.mgmt.cdn.models.MinimumTlsVersion
:param certificate_source: Required. Constant filled by server.
:type certificate_source: str
:param certificate_source_parameters: Required. Defines the certificate
source parameters using user's keyvault certificate for enabling SSL.
:type certificate_source_parameters:
~azure.mgmt.cdn.models.KeyVaultCertificateSourceParameters
"""
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
'certificate_source_parameters': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'certificate_source_parameters': {'key': 'certificateSourceParameters', 'type': 'KeyVaultCertificateSourceParameters'},
}
def __init__(self, **kwargs):
super(UserManagedHttpsParameters, self).__init__(**kwargs)
self.certificate_source_parameters = kwargs.get('certificate_source_parameters', None)
self.certificate_source = 'AzureKeyVault'
class ValidateCustomDomainInput(Model):
"""Input of the custom domain to be validated for DNS mapping.
All required parameters must be populated in order to send to Azure.
:param host_name: Required. The host name of the custom domain. Must be a
domain name.
:type host_name: str
"""
_validation = {
'host_name': {'required': True},
}
_attribute_map = {
'host_name': {'key': 'hostName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateCustomDomainInput, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
class ValidateCustomDomainOutput(Model):
"""Output of custom domain validation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar custom_domain_validated: Indicates whether the custom domain is
valid or not.
:vartype custom_domain_validated: bool
:ivar reason: The reason why the custom domain is not valid.
:vartype reason: str
:ivar message: Error message describing why the custom domain is not
valid.
:vartype message: str
"""
_validation = {
'custom_domain_validated': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'custom_domain_validated': {'key': 'customDomainValidated', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateCustomDomainOutput, self).__init__(**kwargs)
self.custom_domain_validated = None
self.reason = None
self.message = None
class ValidateProbeInput(Model):
"""Input of the validate probe API.
All required parameters must be populated in order to send to Azure.
:param probe_url: Required. The probe URL to validate.
:type probe_url: str
"""
_validation = {
'probe_url': {'required': True},
}
_attribute_map = {
'probe_url': {'key': 'probeURL', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateProbeInput, self).__init__(**kwargs)
self.probe_url = kwargs.get('probe_url', None)
class ValidateProbeOutput(Model):
"""Output of the validate probe API.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar is_valid: Indicates whether the probe URL is accepted or not.
:vartype is_valid: bool
:ivar error_code: Specifies the error code when the probe url is not
accepted.
:vartype error_code: str
:ivar message: The detailed error message describing why the probe URL is
not accepted.
:vartype message: str
"""
_validation = {
'is_valid': {'readonly': True},
'error_code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'is_valid': {'key': 'isValid', 'type': 'bool'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateProbeOutput, self).__init__(**kwargs)
self.is_valid = None
self.error_code = None
self.message = None
| 38.250082 | 726 | 0.667036 |
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CacheExpirationActionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'cache_behavior': {'required': True},
'cache_type': {'required': True, 'constant': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'cache_behavior': {'key': 'cacheBehavior', 'type': 'str'},
'cache_type': {'key': 'cacheType', 'type': 'str'},
'cache_duration': {'key': 'cacheDuration', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters"
cache_type = "All"
def __init__(self, **kwargs):
super(CacheExpirationActionParameters, self).__init__(**kwargs)
self.cache_behavior = kwargs.get('cache_behavior', None)
self.cache_duration = kwargs.get('cache_duration', None)
class CacheKeyQueryStringActionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'query_string_behavior': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'query_string_behavior': {'key': 'queryStringBehavior', 'type': 'str'},
'query_parameters': {'key': 'queryParameters', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCacheKeyQueryStringBehaviorActionParameters"
def __init__(self, **kwargs):
super(CacheKeyQueryStringActionParameters, self).__init__(**kwargs)
self.query_string_behavior = kwargs.get('query_string_behavior', None)
self.query_parameters = kwargs.get('query_parameters', None)
class CdnCertificateSourceParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'certificate_type': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'certificate_type': {'key': 'certificateType', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.CdnCertificateSourceParameters"
def __init__(self, **kwargs):
super(CdnCertificateSourceParameters, self).__init__(**kwargs)
self.certificate_type = kwargs.get('certificate_type', None)
class CustomDomainHttpsParameters(Model):
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
}
_subtype_map = {
'certificate_source': {'Cdn': 'CdnManagedHttpsParameters', 'AzureKeyVault': 'UserManagedHttpsParameters'}
}
def __init__(self, **kwargs):
super(CustomDomainHttpsParameters, self).__init__(**kwargs)
self.protocol_type = kwargs.get('protocol_type', None)
self.minimum_tls_version = kwargs.get('minimum_tls_version', None)
self.certificate_source = None
class CdnManagedHttpsParameters(CustomDomainHttpsParameters):
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
'certificate_source_parameters': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'certificate_source_parameters': {'key': 'certificateSourceParameters', 'type': 'CdnCertificateSourceParameters'},
}
def __init__(self, **kwargs):
super(CdnManagedHttpsParameters, self).__init__(**kwargs)
self.certificate_source_parameters = kwargs.get('certificate_source_parameters', None)
self.certificate_source = 'Cdn'
class CheckNameAvailabilityInput(Model):
_validation = {
'name': {'required': True},
'type': {'required': True, 'constant': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
type = "Microsoft.Cdn/Profiles/Endpoints"
def __init__(self, **kwargs):
super(CheckNameAvailabilityInput, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class CheckNameAvailabilityOutput(Model):
_validation = {
'name_available': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CheckNameAvailabilityOutput, self).__init__(**kwargs)
self.name_available = None
self.reason = None
self.message = None
class CidrIpAddress(Model):
_attribute_map = {
'base_ip_address': {'key': 'baseIpAddress', 'type': 'str'},
'prefix_length': {'key': 'prefixLength', 'type': 'int'},
}
def __init__(self, **kwargs):
super(CidrIpAddress, self).__init__(**kwargs)
self.base_ip_address = kwargs.get('base_ip_address', None)
self.prefix_length = kwargs.get('prefix_length', None)
class CloudError(Model):
_attribute_map = {
}
class CookiesMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleCookiesConditionParameters"
def __init__(self, **kwargs):
super(CookiesMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class Resource(Model):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class CustomDomain(ProxyResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'host_name': {'required': True},
'resource_state': {'readonly': True},
'custom_https_provisioning_state': {'readonly': True},
'custom_https_provisioning_substate': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'custom_https_provisioning_state': {'key': 'properties.customHttpsProvisioningState', 'type': 'str'},
'custom_https_provisioning_substate': {'key': 'properties.customHttpsProvisioningSubstate', 'type': 'str'},
'custom_https_parameters': {'key': 'properties.customHttpsParameters', 'type': 'CustomDomainHttpsParameters'},
'validation_data': {'key': 'properties.validationData', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CustomDomain, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.resource_state = None
self.custom_https_provisioning_state = None
self.custom_https_provisioning_substate = None
self.custom_https_parameters = kwargs.get('custom_https_parameters', None)
self.validation_data = kwargs.get('validation_data', None)
self.provisioning_state = None
class CustomDomainParameters(Model):
_validation = {
'host_name': {'required': True},
}
_attribute_map = {
'host_name': {'key': 'properties.hostName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(CustomDomainParameters, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
class DeepCreatedOrigin(Model):
_validation = {
'name': {'required': True},
'host_name': {'required': True},
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(DeepCreatedOrigin, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
class DeliveryRule(Model):
_validation = {
'order': {'required': True},
'actions': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'order': {'key': 'order', 'type': 'int'},
'conditions': {'key': 'conditions', 'type': '[DeliveryRuleCondition]'},
'actions': {'key': 'actions', 'type': '[DeliveryRuleAction]'},
}
def __init__(self, **kwargs):
super(DeliveryRule, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.order = kwargs.get('order', None)
self.conditions = kwargs.get('conditions', None)
self.actions = kwargs.get('actions', None)
class DeliveryRuleAction(Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'name': {'UrlRedirect': 'UrlRedirectAction', 'UrlRewrite': 'UrlRewriteAction', 'ModifyRequestHeader': 'DeliveryRuleRequestHeaderAction', 'ModifyResponseHeader': 'DeliveryRuleResponseHeaderAction', 'CacheExpiration': 'DeliveryRuleCacheExpirationAction', 'CacheKeyQueryString': 'DeliveryRuleCacheKeyQueryStringAction'}
}
def __init__(self, **kwargs):
super(DeliveryRuleAction, self).__init__(**kwargs)
self.name = None
class DeliveryRuleCacheExpirationAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CacheExpirationActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCacheExpirationAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'CacheExpiration'
class DeliveryRuleCacheKeyQueryStringAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CacheKeyQueryStringActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCacheKeyQueryStringAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'CacheKeyQueryString'
class DeliveryRuleCondition(Model):
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'name': {'RemoteAddress': 'DeliveryRuleRemoteAddressCondition', 'RequestMethod': 'DeliveryRuleRequestMethodCondition', 'QueryString': 'DeliveryRuleQueryStringCondition', 'PostArgs': 'DeliveryRulePostArgsCondition', 'RequestUri': 'DeliveryRuleRequestUriCondition', 'RequestHeader': 'DeliveryRuleRequestHeaderCondition', 'RequestBody': 'DeliveryRuleRequestBodyCondition', 'RequestScheme': 'DeliveryRuleRequestSchemeCondition', 'UrlPath': 'DeliveryRuleUrlPathCondition', 'UrlFileExtension': 'DeliveryRuleUrlFileExtensionCondition', 'UrlFileName': 'DeliveryRuleUrlFileNameCondition', 'HttpVersion': 'DeliveryRuleHttpVersionCondition', 'Cookies': 'DeliveryRuleCookiesCondition', 'IsDevice': 'DeliveryRuleIsDeviceCondition'}
}
def __init__(self, **kwargs):
super(DeliveryRuleCondition, self).__init__(**kwargs)
self.name = None
class DeliveryRuleCookiesCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'CookiesMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleCookiesCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'Cookies'
class DeliveryRuleHttpVersionCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HttpVersionMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleHttpVersionCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'HttpVersion'
class DeliveryRuleIsDeviceCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'IsDeviceMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleIsDeviceCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'IsDevice'
class DeliveryRulePostArgsCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'PostArgsMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRulePostArgsCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'PostArgs'
class DeliveryRuleQueryStringCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'QueryStringMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleQueryStringCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'QueryString'
class DeliveryRuleRemoteAddressCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RemoteAddressMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRemoteAddressCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RemoteAddress'
class DeliveryRuleRequestBodyCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestBodyMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestBodyCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestBody'
class DeliveryRuleRequestHeaderAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HeaderActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestHeaderAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'ModifyRequestHeader'
class DeliveryRuleRequestHeaderCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestHeaderMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestHeaderCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestHeader'
class DeliveryRuleRequestMethodCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestMethodMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestMethodCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestMethod'
class DeliveryRuleRequestSchemeCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestSchemeMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestSchemeCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestScheme'
class DeliveryRuleRequestUriCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'RequestUriMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleRequestUriCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'RequestUri'
class DeliveryRuleResponseHeaderAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'HeaderActionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleResponseHeaderAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'ModifyResponseHeader'
class DeliveryRuleUrlFileExtensionCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlFileExtensionMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlFileExtensionCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlFileExtension'
class DeliveryRuleUrlFileNameCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlFileNameMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlFileNameCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlFileName'
class DeliveryRuleUrlPathCondition(DeliveryRuleCondition):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlPathMatchConditionParameters'},
}
def __init__(self, **kwargs):
super(DeliveryRuleUrlPathCondition, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlPath'
class EdgeNode(ProxyResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'ip_address_groups': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'ip_address_groups': {'key': 'properties.ipAddressGroups', 'type': '[IpAddressGroup]'},
}
def __init__(self, **kwargs):
super(EdgeNode, self).__init__(**kwargs)
self.ip_address_groups = kwargs.get('ip_address_groups', None)
class TrackedResource(Resource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(TrackedResource, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.tags = kwargs.get('tags', None)
class Endpoint(TrackedResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'host_name': {'readonly': True},
'origins': {'required': True},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'origin_path': {'key': 'properties.originPath', 'type': 'str'},
'content_types_to_compress': {'key': 'properties.contentTypesToCompress', 'type': '[str]'},
'is_compression_enabled': {'key': 'properties.isCompressionEnabled', 'type': 'bool'},
'is_http_allowed': {'key': 'properties.isHttpAllowed', 'type': 'bool'},
'is_https_allowed': {'key': 'properties.isHttpsAllowed', 'type': 'bool'},
'query_string_caching_behavior': {'key': 'properties.queryStringCachingBehavior', 'type': 'QueryStringCachingBehavior'},
'optimization_type': {'key': 'properties.optimizationType', 'type': 'str'},
'probe_path': {'key': 'properties.probePath', 'type': 'str'},
'geo_filters': {'key': 'properties.geoFilters', 'type': '[GeoFilter]'},
'delivery_policy': {'key': 'properties.deliveryPolicy', 'type': 'EndpointPropertiesUpdateParametersDeliveryPolicy'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'origins': {'key': 'properties.origins', 'type': '[DeepCreatedOrigin]'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Endpoint, self).__init__(**kwargs)
self.origin_host_header = kwargs.get('origin_host_header', None)
self.origin_path = kwargs.get('origin_path', None)
self.content_types_to_compress = kwargs.get('content_types_to_compress', None)
self.is_compression_enabled = kwargs.get('is_compression_enabled', None)
self.is_http_allowed = kwargs.get('is_http_allowed', None)
self.is_https_allowed = kwargs.get('is_https_allowed', None)
self.query_string_caching_behavior = kwargs.get('query_string_caching_behavior', None)
self.optimization_type = kwargs.get('optimization_type', None)
self.probe_path = kwargs.get('probe_path', None)
self.geo_filters = kwargs.get('geo_filters', None)
self.delivery_policy = kwargs.get('delivery_policy', None)
self.host_name = None
self.origins = kwargs.get('origins', None)
self.resource_state = None
self.provisioning_state = None
class EndpointPropertiesUpdateParametersDeliveryPolicy(Model):
_validation = {
'rules': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'rules': {'key': 'rules', 'type': '[DeliveryRule]'},
}
def __init__(self, **kwargs):
super(EndpointPropertiesUpdateParametersDeliveryPolicy, self).__init__(**kwargs)
self.description = kwargs.get('description', None)
self.rules = kwargs.get('rules', None)
class EndpointUpdateParameters(Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'origin_host_header': {'key': 'properties.originHostHeader', 'type': 'str'},
'origin_path': {'key': 'properties.originPath', 'type': 'str'},
'content_types_to_compress': {'key': 'properties.contentTypesToCompress', 'type': '[str]'},
'is_compression_enabled': {'key': 'properties.isCompressionEnabled', 'type': 'bool'},
'is_http_allowed': {'key': 'properties.isHttpAllowed', 'type': 'bool'},
'is_https_allowed': {'key': 'properties.isHttpsAllowed', 'type': 'bool'},
'query_string_caching_behavior': {'key': 'properties.queryStringCachingBehavior', 'type': 'QueryStringCachingBehavior'},
'optimization_type': {'key': 'properties.optimizationType', 'type': 'str'},
'probe_path': {'key': 'properties.probePath', 'type': 'str'},
'geo_filters': {'key': 'properties.geoFilters', 'type': '[GeoFilter]'},
'delivery_policy': {'key': 'properties.deliveryPolicy', 'type': 'EndpointPropertiesUpdateParametersDeliveryPolicy'},
}
def __init__(self, **kwargs):
super(EndpointUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.origin_host_header = kwargs.get('origin_host_header', None)
self.origin_path = kwargs.get('origin_path', None)
self.content_types_to_compress = kwargs.get('content_types_to_compress', None)
self.is_compression_enabled = kwargs.get('is_compression_enabled', None)
self.is_http_allowed = kwargs.get('is_http_allowed', None)
self.is_https_allowed = kwargs.get('is_https_allowed', None)
self.query_string_caching_behavior = kwargs.get('query_string_caching_behavior', None)
self.optimization_type = kwargs.get('optimization_type', None)
self.probe_path = kwargs.get('probe_path', None)
self.geo_filters = kwargs.get('geo_filters', None)
self.delivery_policy = kwargs.get('delivery_policy', None)
class ErrorResponse(Model):
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.code = None
self.message = None
class ErrorResponseException(HttpOperationError):
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class GeoFilter(Model):
_validation = {
'relative_path': {'required': True},
'action': {'required': True},
'country_codes': {'required': True},
}
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'action': {'key': 'action', 'type': 'GeoFilterActions'},
'country_codes': {'key': 'countryCodes', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(GeoFilter, self).__init__(**kwargs)
self.relative_path = kwargs.get('relative_path', None)
self.action = kwargs.get('action', None)
self.country_codes = kwargs.get('country_codes', None)
class HeaderActionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'header_action': {'required': True},
'header_name': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'header_action': {'key': 'headerAction', 'type': 'str'},
'header_name': {'key': 'headerName', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleHeaderActionParameters"
def __init__(self, **kwargs):
super(HeaderActionParameters, self).__init__(**kwargs)
self.header_action = kwargs.get('header_action', None)
self.header_name = kwargs.get('header_name', None)
self.value = kwargs.get('value', None)
class HttpVersionMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleHttpVersionConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(HttpVersionMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class IpAddressGroup(Model):
_attribute_map = {
'delivery_region': {'key': 'deliveryRegion', 'type': 'str'},
'ipv4_addresses': {'key': 'ipv4Addresses', 'type': '[CidrIpAddress]'},
'ipv6_addresses': {'key': 'ipv6Addresses', 'type': '[CidrIpAddress]'},
}
def __init__(self, **kwargs):
super(IpAddressGroup, self).__init__(**kwargs)
self.delivery_region = kwargs.get('delivery_region', None)
self.ipv4_addresses = kwargs.get('ipv4_addresses', None)
self.ipv6_addresses = kwargs.get('ipv6_addresses', None)
class IsDeviceMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleIsDeviceConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(IsDeviceMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class KeyVaultCertificateSourceParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'subscription_id': {'required': True},
'resource_group_name': {'required': True},
'vault_name': {'required': True},
'secret_name': {'required': True},
'secret_version': {'required': True},
'update_rule': {'required': True, 'constant': True},
'delete_rule': {'required': True, 'constant': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group_name': {'key': 'resourceGroupName', 'type': 'str'},
'vault_name': {'key': 'vaultName', 'type': 'str'},
'secret_name': {'key': 'secretName', 'type': 'str'},
'secret_version': {'key': 'secretVersion', 'type': 'str'},
'update_rule': {'key': 'updateRule', 'type': 'str'},
'delete_rule': {'key': 'deleteRule', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.KeyVaultCertificateSourceParameters"
update_rule = "NoAction"
delete_rule = "NoAction"
def __init__(self, **kwargs):
super(KeyVaultCertificateSourceParameters, self).__init__(**kwargs)
self.subscription_id = kwargs.get('subscription_id', None)
self.resource_group_name = kwargs.get('resource_group_name', None)
self.vault_name = kwargs.get('vault_name', None)
self.secret_name = kwargs.get('secret_name', None)
self.secret_version = kwargs.get('secret_version', None)
class LoadParameters(Model):
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(LoadParameters, self).__init__(**kwargs)
self.content_paths = kwargs.get('content_paths', None)
class Operation(Model):
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
}
def __init__(self, **kwargs):
super(Operation, self).__init__(**kwargs)
self.name = None
self.display = kwargs.get('display', None)
class OperationDisplay(Model):
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = None
self.resource = None
self.operation = None
class Origin(TrackedResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'host_name': {'required': True},
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Origin, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
self.resource_state = None
self.provisioning_state = None
class OriginUpdateParameters(Model):
_validation = {
'http_port': {'maximum': 65535, 'minimum': 1},
'https_port': {'maximum': 65535, 'minimum': 1},
}
_attribute_map = {
'host_name': {'key': 'properties.hostName', 'type': 'str'},
'http_port': {'key': 'properties.httpPort', 'type': 'int'},
'https_port': {'key': 'properties.httpsPort', 'type': 'int'},
}
def __init__(self, **kwargs):
super(OriginUpdateParameters, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
self.http_port = kwargs.get('http_port', None)
self.https_port = kwargs.get('https_port', None)
class PostArgsMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRulePostArgsConditionParameters"
def __init__(self, **kwargs):
super(PostArgsMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class Profile(TrackedResource):
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'sku': {'required': True},
'resource_state': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'Sku'},
'resource_state': {'key': 'properties.resourceState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Profile, self).__init__(**kwargs)
self.sku = kwargs.get('sku', None)
self.resource_state = None
self.provisioning_state = None
class ProfileUpdateParameters(Model):
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, **kwargs):
super(ProfileUpdateParameters, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
class PurgeParameters(Model):
_validation = {
'content_paths': {'required': True},
}
_attribute_map = {
'content_paths': {'key': 'contentPaths', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(PurgeParameters, self).__init__(**kwargs)
self.content_paths = kwargs.get('content_paths', None)
class QueryStringMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleQueryStringConditionParameters"
def __init__(self, **kwargs):
super(QueryStringMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RemoteAddressMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRemoteAddressConditionParameters"
def __init__(self, **kwargs):
super(RemoteAddressMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestBodyMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestBodyConditionParameters"
def __init__(self, **kwargs):
super(RequestBodyMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestHeaderMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'selector': {'required': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'selector': {'key': 'selector', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestHeaderConditionParameters"
def __init__(self, **kwargs):
super(RequestHeaderMatchConditionParameters, self).__init__(**kwargs)
self.selector = kwargs.get('selector', None)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class RequestMethodMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestMethodConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(RequestMethodMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class RequestSchemeMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True, 'constant': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestSchemeConditionParameters"
operator = "Equal"
def __init__(self, **kwargs):
super(RequestSchemeMatchConditionParameters, self).__init__(**kwargs)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
class RequestUriMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleRequestUriConditionParameters"
def __init__(self, **kwargs):
super(RequestUriMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class ResourceUsage(Model):
_validation = {
'resource_type': {'readonly': True},
'unit': {'readonly': True},
'current_value': {'readonly': True},
'limit': {'readonly': True},
}
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'int'},
'limit': {'key': 'limit', 'type': 'int'},
}
def __init__(self, **kwargs):
super(ResourceUsage, self).__init__(**kwargs)
self.resource_type = None
self.unit = None
self.current_value = None
self.limit = None
class Sku(Model):
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Sku, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
class SsoUri(Model):
_validation = {
'sso_uri_value': {'readonly': True},
}
_attribute_map = {
'sso_uri_value': {'key': 'ssoUriValue', 'type': 'str'},
}
def __init__(self, **kwargs):
super(SsoUri, self).__init__(**kwargs)
self.sso_uri_value = None
class SupportedOptimizationTypesListResult(Model):
_validation = {
'supported_optimization_types': {'readonly': True},
}
_attribute_map = {
'supported_optimization_types': {'key': 'supportedOptimizationTypes', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(SupportedOptimizationTypesListResult, self).__init__(**kwargs)
self.supported_optimization_types = None
class UrlFileExtensionMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFileExtensionMatchConditionParameters"
def __init__(self, **kwargs):
super(UrlFileExtensionMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlFileNameMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlFilenameConditionParameters"
def __init__(self, **kwargs):
super(UrlFileNameMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlPathMatchConditionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'operator': {'required': True},
'match_values': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'operator': {'key': 'operator', 'type': 'str'},
'negate_condition': {'key': 'negateCondition', 'type': 'bool'},
'match_values': {'key': 'matchValues', 'type': '[str]'},
'transforms': {'key': 'transforms', 'type': '[str]'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlPathMatchConditionParameters"
def __init__(self, **kwargs):
super(UrlPathMatchConditionParameters, self).__init__(**kwargs)
self.operator = kwargs.get('operator', None)
self.negate_condition = kwargs.get('negate_condition', None)
self.match_values = kwargs.get('match_values', None)
self.transforms = kwargs.get('transforms', None)
class UrlRedirectAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlRedirectActionParameters'},
}
def __init__(self, **kwargs):
super(UrlRedirectAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlRedirect'
class UrlRedirectActionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'redirect_type': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'redirect_type': {'key': 'redirectType', 'type': 'str'},
'destination_protocol': {'key': 'destinationProtocol', 'type': 'str'},
'custom_path': {'key': 'customPath', 'type': 'str'},
'custom_hostname': {'key': 'customHostname', 'type': 'str'},
'custom_query_string': {'key': 'customQueryString', 'type': 'str'},
'custom_fragment': {'key': 'customFragment', 'type': 'str'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRedirectActionParameters"
def __init__(self, **kwargs):
super(UrlRedirectActionParameters, self).__init__(**kwargs)
self.redirect_type = kwargs.get('redirect_type', None)
self.destination_protocol = kwargs.get('destination_protocol', None)
self.custom_path = kwargs.get('custom_path', None)
self.custom_hostname = kwargs.get('custom_hostname', None)
self.custom_query_string = kwargs.get('custom_query_string', None)
self.custom_fragment = kwargs.get('custom_fragment', None)
class UrlRewriteAction(DeliveryRuleAction):
_validation = {
'name': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'UrlRewriteActionParameters'},
}
def __init__(self, **kwargs):
super(UrlRewriteAction, self).__init__(**kwargs)
self.parameters = kwargs.get('parameters', None)
self.name = 'UrlRewrite'
class UrlRewriteActionParameters(Model):
_validation = {
'odatatype': {'required': True, 'constant': True},
'source_pattern': {'required': True},
'destination': {'required': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'source_pattern': {'key': 'sourcePattern', 'type': 'str'},
'destination': {'key': 'destination', 'type': 'str'},
'preserve_unmatched_path': {'key': 'preserveUnmatchedPath', 'type': 'bool'},
}
odatatype = "#Microsoft.Azure.Cdn.Models.DeliveryRuleUrlRewriteActionParameters"
def __init__(self, **kwargs):
super(UrlRewriteActionParameters, self).__init__(**kwargs)
self.source_pattern = kwargs.get('source_pattern', None)
self.destination = kwargs.get('destination', None)
self.preserve_unmatched_path = kwargs.get('preserve_unmatched_path', None)
class UserManagedHttpsParameters(CustomDomainHttpsParameters):
_validation = {
'protocol_type': {'required': True},
'certificate_source': {'required': True},
'certificate_source_parameters': {'required': True},
}
_attribute_map = {
'protocol_type': {'key': 'protocolType', 'type': 'str'},
'minimum_tls_version': {'key': 'minimumTlsVersion', 'type': 'MinimumTlsVersion'},
'certificate_source': {'key': 'certificateSource', 'type': 'str'},
'certificate_source_parameters': {'key': 'certificateSourceParameters', 'type': 'KeyVaultCertificateSourceParameters'},
}
def __init__(self, **kwargs):
super(UserManagedHttpsParameters, self).__init__(**kwargs)
self.certificate_source_parameters = kwargs.get('certificate_source_parameters', None)
self.certificate_source = 'AzureKeyVault'
class ValidateCustomDomainInput(Model):
_validation = {
'host_name': {'required': True},
}
_attribute_map = {
'host_name': {'key': 'hostName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateCustomDomainInput, self).__init__(**kwargs)
self.host_name = kwargs.get('host_name', None)
class ValidateCustomDomainOutput(Model):
_validation = {
'custom_domain_validated': {'readonly': True},
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'custom_domain_validated': {'key': 'customDomainValidated', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateCustomDomainOutput, self).__init__(**kwargs)
self.custom_domain_validated = None
self.reason = None
self.message = None
class ValidateProbeInput(Model):
_validation = {
'probe_url': {'required': True},
}
_attribute_map = {
'probe_url': {'key': 'probeURL', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateProbeInput, self).__init__(**kwargs)
self.probe_url = kwargs.get('probe_url', None)
class ValidateProbeOutput(Model):
_validation = {
'is_valid': {'readonly': True},
'error_code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'is_valid': {'key': 'isValid', 'type': 'bool'},
'error_code': {'key': 'errorCode', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ValidateProbeOutput, self).__init__(**kwargs)
self.is_valid = None
self.error_code = None
self.message = None
| true | true |
1c3ce311e8dfd3dccb6639851fcb620f53402a91 | 2,364 | py | Python | MshNeo4j/__init__.py | Karanshade/moonshade | 4e119af40cd694396afd2d6a5bffdcc65b8bff09 | [
"Apache-2.0"
] | null | null | null | MshNeo4j/__init__.py | Karanshade/moonshade | 4e119af40cd694396afd2d6a5bffdcc65b8bff09 | [
"Apache-2.0"
] | null | null | null | MshNeo4j/__init__.py | Karanshade/moonshade | 4e119af40cd694396afd2d6a5bffdcc65b8bff09 | [
"Apache-2.0"
] | null | null | null | import os
from subprocess import check_output
import pandas as pd
from py2neo import Graph, Node, Relationship
import karmahutils as kut
version_info = "v0.1"
version_type = 'moonshade library'
authors = ['Yann Girard']
contact = 'yann.girard@gmail.com'
lib_name = 'MshNeo4j'
purpose = """QoL tools for interacting and maintaining neo4j db."""
def get_graph(key, ip, user, database="validalabdev"):
"""create a Graph object connecting to the database.
The function is there to provide space to handle connection failure"""
try:
return Graph('bolt://' + ip, auth=(user, key), name="validalabdev")
except Exception as e:
kut.display_message('can not connect to', database, 'with user', user, 'on ip', ip)
print(e)
def cypher_command(cypher_string, user, key, in_db=None):
command = 'cypher-shell'
if in_db is not None:
command += f' -d {in_db}'
return command + f' -u {user} -p {key} "{cypher_string}"'
def execute_cypher(cypher_string, user, key, silent_mode=True, in_db=None):
command = cypher_command(cypher_string=cypher_string, user=user, key=key, in_db=in_db)
if not silent_mode:
print(command)
return check_output(command, shell=True)
def show_databases():
show_database = execute_cypher("show databases;")
show_array = [X.split(',') for X in show_database.decode("unicode_escape").split('\n')]
db_printing = pd.DataFrame(data=show_array[1:], columns=show_array[0])
print(db_printing)
return db_printing
def backup_database(database, backup_dir="/data/backup-data/"):
# read the backup
content_dir = os.listdir(backup_dir)
content_dir.sort()
latest_dump = content_dir[-1]
print('restoring from:', latest_dump)
# shutdown the dev db
shut_cypher = f"stop database {database};"
print('shutting down database')
execute_cypher(shut_cypher, silent_mode=False)
print('done')
# load data
load_command = "neo4j-admin load --force --from=" + backup_dir + latest_dump + " --database=" + database
print("loading through:", load_command)
check_output(load_command, shell=True)
# restart the dev db
restart_cypher = f"start database {database};"
print('restarting database')
execute_cypher(restart_cypher, in_db='neo4j', silent_mode=False)
print("done")
return show_databases()
| 33.295775 | 108 | 0.697547 | import os
from subprocess import check_output
import pandas as pd
from py2neo import Graph, Node, Relationship
import karmahutils as kut
version_info = "v0.1"
version_type = 'moonshade library'
authors = ['Yann Girard']
contact = 'yann.girard@gmail.com'
lib_name = 'MshNeo4j'
purpose = """QoL tools for interacting and maintaining neo4j db."""
def get_graph(key, ip, user, database="validalabdev"):
try:
return Graph('bolt://' + ip, auth=(user, key), name="validalabdev")
except Exception as e:
kut.display_message('can not connect to', database, 'with user', user, 'on ip', ip)
print(e)
def cypher_command(cypher_string, user, key, in_db=None):
command = 'cypher-shell'
if in_db is not None:
command += f' -d {in_db}'
return command + f' -u {user} -p {key} "{cypher_string}"'
def execute_cypher(cypher_string, user, key, silent_mode=True, in_db=None):
command = cypher_command(cypher_string=cypher_string, user=user, key=key, in_db=in_db)
if not silent_mode:
print(command)
return check_output(command, shell=True)
def show_databases():
show_database = execute_cypher("show databases;")
show_array = [X.split(',') for X in show_database.decode("unicode_escape").split('\n')]
db_printing = pd.DataFrame(data=show_array[1:], columns=show_array[0])
print(db_printing)
return db_printing
def backup_database(database, backup_dir="/data/backup-data/"):
content_dir = os.listdir(backup_dir)
content_dir.sort()
latest_dump = content_dir[-1]
print('restoring from:', latest_dump)
shut_cypher = f"stop database {database};"
print('shutting down database')
execute_cypher(shut_cypher, silent_mode=False)
print('done')
load_command = "neo4j-admin load --force --from=" + backup_dir + latest_dump + " --database=" + database
print("loading through:", load_command)
check_output(load_command, shell=True)
restart_cypher = f"start database {database};"
print('restarting database')
execute_cypher(restart_cypher, in_db='neo4j', silent_mode=False)
print("done")
return show_databases()
| true | true |
1c3ce37b8e58e9029de970114e076aa16827be67 | 31,219 | py | Python | eor_limits/plot_eor_limits.py | JulianBMunoz/eor_limits | 780eef1d46862a69e6d249a90a9a230517436cea | [
"BSD-2-Clause"
] | null | null | null | eor_limits/plot_eor_limits.py | JulianBMunoz/eor_limits | 780eef1d46862a69e6d249a90a9a230517436cea | [
"BSD-2-Clause"
] | null | null | null | eor_limits/plot_eor_limits.py | JulianBMunoz/eor_limits | 780eef1d46862a69e6d249a90a9a230517436cea | [
"BSD-2-Clause"
] | null | null | null | #! /usr/bin/env python
# -*- mode: python; coding: utf-8 -*
# Copyright (c) 2019 Nichole Barry, Bryna Hazelton
# Licensed under the 2-clause BSD License
"""Code for plotting EoR Limits."""
import glob
import os
import copy
import yaml
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
from eor_limits.data import DATA_PATH
default_theory_params = {
"munoz_2021_AllGalaxies_z8.5": {
"paper": "munoz_2021",
"model": "EOS",
"redshift": 8.5,
"linewidth": 3,
},
"mesinger_2016_faint_nf0.8": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_bright_nf0.8": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_faint_nf0.5": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.5,
"linewidth": 3,
},
"mesinger_2016_bright_nf0.5": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.5,
"linewidth": 2,
},
"pagano_beta1_z8.5": {"paper": "pagano_liu_2020", "beta": 1, "redshift": 8.5},
"pagano_beta-1_z8.5": {"paper": "pagano_liu_2020", "beta": -1, "redshift": 8.5},
}
def read_data_yaml(paper_name, theory=False):
"""
Read in the data from a paper yaml file.
Parameters
----------
paper_name : str
Short name of paper (usually author_year) which corresponds to a file
in the data directory named <paper_name>.yaml
theory : bool
Flag that this is a theory paper and so is in the theory folder.
Returns
-------
dict
Dictionary with the parsed yaml for use in the plotting code.
"""
if theory:
file_name = os.path.join(DATA_PATH, "theory", paper_name + ".yaml")
else:
file_name = os.path.join(DATA_PATH, paper_name + ".yaml")
with open(file_name, "r") as pfile:
paper_dict = yaml.safe_load(pfile)
if isinstance(paper_dict["delta_squared"][0], (str,)):
try:
paper_dict["delta_squared"] = [
float(val) for val in paper_dict["delta_squared"]
]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"] = val_list
elif isinstance(paper_dict["delta_squared"][0], (list,)) and isinstance(
paper_dict["delta_squared"][0][0], (str,)
):
for ind, elem in enumerate(paper_dict["delta_squared"]):
try:
paper_dict["delta_squared"][ind] = [float(val) for val in elem]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"][ind]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"][ind] = val_list
return paper_dict
def make_plot(
papers=None,
include_theory=True,
theory_legend=True,
theory_params=default_theory_params,
plot_as_points=["patil_2017", "mertens_2020"],
plot_filename="eor_limits.pdf",
delta_squared_range=None,
redshift_range=None,
k_range=None,
shade_limits="generational",
shade_theory="flat",
colormap="Spectral_r",
bold_papers=None,
fontsize=15,
):
"""
Plot the current EoR Limits as a function of k and redshift.
Parameters
----------
papers : list of str
List of papers to include in the plot (specified as 'author_year',
must be present in the data folder).
Defaults to `None` meaning include all papers in the data folder.
include_theory : bool
Flag to include theory lines on plots.
theory_params : dict
Dictionary specifying theory lines to include on the plot. Dictionary
parameters depend on the theory paper. E.g. for lines from Mesinger et al. 2016,
the options are 'model' which can be 'bright' or 'faint', 'nf' which specifies
a neutral fraction and 'redshift'. See the paper specific modules for more
examples. Only used if `include_theory` is True.
theory_legend : bool
Option to exclude theory lines from the legend. Used by some users who prefer
to add the annotations on the lines by hand to improve readability.
plot_as_points : list of str
List of papers that have a line type data model to be plotted as points rather
that a line.
delta_squared_range : list of float
Range of delta squared values to include in plot (yaxis range). Must be
length 2 with second element greater than first element. Defaults to [1e3, 1e6]
if include_theory is False and [1e0, 1e6] otherwise.
redshift_range : list of float
Range of redshifts to include in the plot. Must be length 2 with the second
element greater than the first element.
k_range : list of float
Range of ks to include in the plot. Must be length 2 with the second element
greater than the first element.
shade_limits : {'generational', 'alpha', False}
How to shade above plotted limits. 'generational' shading shades dark grey for
all generation 1 papers and light grey for later generation papers. 'alpha'
shading shades all papers with semi-transparent grey. Setting this to False
results in no shading.
shade_theory : {'flat', 'alpha', False}
How to shade below theory lines. 'flat' shading shades light grey below all
theory lines. 'alpha' shading shades below all theory lines with
semi-transparent grey. Setting this to False results in no shading.
colormap : str
Matplotlib colormap to use for redshift.
plot_filename : str
File name to save plot to.
bold_papers : list of str
List of papers to bold in caption.
"""
if papers is None:
# use all the papers. This gives weird ordering which we will fix later
papers_sorted = False
papers = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "*.yaml"))
]
else:
# if a list is passed in by hand, don't reorder it
papers_sorted = True
if delta_squared_range is None:
if include_theory:
delta_squared_range = [1e0, 1e6]
else:
delta_squared_range = [1e3, 1e6]
if bold_papers is None:
bold_papers = []
generation1 = [
"paciga_2013",
"dillon_2014",
"dillon_2015",
"beardsley_2016",
"patil_2017",
"kolopanis_2019",
]
paper_list = []
for paper_name in papers:
paper_dict = read_data_yaml(paper_name)
if paper_name in bold_papers:
paper_dict["bold"] = True
else:
paper_dict["bold"] = False
if paper_name in plot_as_points:
paper_dict["plot_as_point"] = True
else:
paper_dict["plot_as_point"] = False
if paper_name in generation1:
paper_dict["generation1"] = True
else:
paper_dict["generation1"] = False
paper_list.append(paper_dict)
if not papers_sorted:
paper_list.sort(key=lambda paper_list: paper_list["year"])
if include_theory:
theory_paper_list = []
for name, theory in theory_params.items():
theory_paper_yamls = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "theory", "*.yaml"))
]
if theory["paper"] in theory_paper_yamls:
paper_dict = read_data_yaml(theory["paper"], theory=True)
elif theory["paper"] == "mesinger_2016":
from eor_limits.process_mesinger_2016 import get_mesinger_2016_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_mesinger_2016_line(**dict_use)
elif theory["paper"] == "pagano_liu_2020":
from eor_limits.process_pagano_2020 import get_pagano_2020_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_pagano_2020_line(**dict_use)
elif theory["paper"] == "munoz_2021":
from eor_limits.process_munoz_2021 import get_munoz_2021_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_munoz_2021_line(**dict_use)
else:
raise ValueError(
"Theory paper " + theory["paper"] + " is not a yaml in the "
"data/theory folder and is not a paper with a known processing "
"module."
)
theory_paper_list.append(paper_dict)
if redshift_range is not None:
if len(redshift_range) != 2:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
if redshift_range[0] >= redshift_range[1]:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
norm = colors.Normalize(vmin=redshift_range[0], vmax=redshift_range[1])
else:
redshift_list = []
for paper in paper_list:
if paper["type"] == "point":
delta_array = np.array(paper["delta_squared"])
paper_redshifts = np.array(paper["redshift"])
if paper_redshifts.size == 1 and delta_array.size > 1:
paper_redshifts = np.repeat(paper_redshifts[0], delta_array.size)
if k_range is not None:
k_vals = np.asarray(paper["k"])
inds_use = np.nonzero(
(delta_array <= delta_squared_range[1])
& (k_vals <= k_range[1])
& (k_vals >= k_range[0])
)[0]
else:
inds_use = np.nonzero(delta_array <= delta_squared_range[1])[0]
if len(paper["redshift"]) == 1 and inds_use.size > 0:
inds_use = np.asarray([0])
redshift_list += list(paper_redshifts[inds_use])
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
for ind, elem in enumerate(redshifts):
delta_array = np.asarray(delta_squared[ind])
if k_range is not None:
k_array = np.asarray(k_vals[ind])
if np.nanmin(delta_array) <= delta_squared_range[1] or (
np.min(k_array) <= k_range[1]
and np.max(k_array) >= k_range[0]
):
redshift_list.append(elem)
else:
if np.nanmin(delta_array) <= delta_squared_range[1]:
redshift_list.append(elem)
redshift_list = sorted(set(redshift_list))
if np.min(redshift_list) < np.max(redshift_list):
redshift_range_use = [redshift_list[0], redshift_list[-1]]
else:
# if only 1 redshift and no range specified, use a range of 2 centered on
# redshift of data.
redshift_range_use = [redshift_list[0] - 1, redshift_list[0] + 1]
norm = colors.Normalize(vmin=redshift_range_use[0], vmax=redshift_range_use[1])
scalar_map = cmx.ScalarMappable(norm=norm, cmap=colormap)
if include_theory:
fig_height = 20
else:
fig_height = 10
fig_width = 20
fig = plt.figure(figsize=(fig_width, fig_height))
legend_names = []
lines = []
paper_ks = []
skipped_papers = []
for paper_i, paper in enumerate(paper_list):
if paper["bold"]:
label_start = " $\\bf{"
else:
label_start = " $\\rm{"
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["telescope"].split(" "))
+ r"\ ("
+ paper["author"]
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
if paper["type"] == "point":
if len(paper["redshift"]) == 1 and len(paper["delta_squared"]) > 1:
paper["redshift"] = paper["redshift"] * len(paper["delta_squared"])
elif len(paper["redshift"]) != len(paper["delta_squared"]):
raise ValueError(f"{label} has the wrong number of redshift values.")
delta_squared = np.asarray(paper["delta_squared"])
if redshift_range is not None:
redshift_array = np.asarray(paper["redshift"])
points_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
& (delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
else:
points_use = np.where(
(delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
if points_use.size == 0:
skipped_papers.append(paper)
continue
else:
paper_ks.extend(list(np.asarray(paper["k"])[points_use]))
delta_squared = np.asarray(paper["delta_squared"])[points_use]
line = plt.scatter(
np.asarray(paper["k"])[points_use],
delta_squared,
marker=paper["marker"],
c=np.asarray(paper["redshift"])[points_use].tolist(),
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
for index in points_use:
k_edges = [paper["k_lower"][index], paper["k_upper"][index]]
delta_edges = [
paper["delta_squared"][index],
paper["delta_squared"][index],
]
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
lines.append(line)
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
k_lower = [paper["k_lower"]]
k_upper = [paper["k_upper"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
k_lower = paper["k_lower"]
k_upper = paper["k_upper"]
delta_squared = paper["delta_squared"]
if redshift_range is not None:
redshift_array = np.asarray(redshifts)
lines_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
)[0]
if lines_use.size == 0:
skipped_papers.append(paper)
continue
else:
lines_use = np.arange(len(redshifts))
for ind, redshift in enumerate(np.asarray(redshifts)[lines_use]):
paper_ks.extend(k_vals[ind])
k_edges = np.stack(
(np.asarray(k_lower[ind]), np.asarray(k_upper[ind]))
).T.flatten()
delta_edges = np.stack(
(np.asarray(delta_squared[ind]), np.asarray(delta_squared[ind]))
).T.flatten()
if paper["plot_as_point"]:
line = plt.scatter(
k_vals[ind],
delta_squared[ind],
marker=paper["marker"],
c=np.zeros(len(k_vals[ind])) + redshift,
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
else:
color_val = scalar_map.to_rgba(redshift)
# make black outline by plotting thicker black line first
plt.plot(
k_edges,
delta_edges,
c="black",
linewidth=paper["linewidth"] + 2,
zorder=2,
)
(line,) = plt.plot(
k_edges,
delta_edges,
c=color_val,
linewidth=paper["linewidth"],
label=label,
zorder=2,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
if ind == 0:
lines.append(line)
legend_names.append(label)
if len(skipped_papers) == len(paper_list):
raise ValueError("No papers in specified redshift and/or delta squared range.")
theory_line_inds = []
if include_theory:
# we want to supress legend labels for theories with linewidth=0
# which are only used for shading
# fix ordering to put them at the end
linewidths = np.asarray([paper["linewidth"] for paper in theory_paper_list])
ordering = np.argsort(linewidths == 0)
theory_paper_list = [theory_paper_list[p] for p in ordering]
for paper in theory_paper_list:
label_start = " $\\bf{Theory:} \\rm{ "
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["model"].split(" "))
+ r"\ ("
+ r"\ ".join(paper["author"].split(" "))
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
(line,) = plt.plot(
k_vals,
delta_squared,
c="lightsteelblue",
linewidth=paper["linewidth"],
linestyle=paper["linestyle"],
zorder=2,
)
if shade_theory is not False:
if shade_theory == "flat":
color_use = "aliceblue"
zorder = 0
alpha = 1
else:
color_use = "lightsteelblue"
zorder = 0
alpha = 1.0 / len(theory_paper_list)
plt.fill_between(
k_vals,
delta_squared,
delta_squared_range[0],
color=color_use,
alpha=alpha,
zorder=zorder,
)
theory_line_inds.append(len(lines))
lines.append(line)
if paper["linewidth"] > 0 and theory_legend:
legend_names.append(label)
point_size = 1 / 72.0 # typography standard (points/inch)
font_inch = fontsize * point_size
plt.rcParams.update({"font.size": fontsize})
plt.xlabel("k ($h Mpc^{-1}$)", fontsize=fontsize)
plt.ylabel("$\Delta^2$ ($mK^2$)", fontsize=fontsize) # noqa
plt.yscale("log")
plt.xscale("log")
plt.ylim(*delta_squared_range)
if k_range is None:
k_range = [np.min(paper_ks), np.max(paper_ks)]
min_factor = 10 ** np.ceil(np.log10(k_range[0]) * -1)
max_factor = 10 ** np.ceil(np.log10(k_range[1]) * -1)
k_range = [
np.floor(k_range[0] * min_factor) / min_factor,
np.ceil(k_range[1] * max_factor) / max_factor,
]
plt.xlim(*k_range)
plt.tick_params(labelsize=fontsize)
cb = plt.colorbar(scalar_map, fraction=0.1, pad=0.08, label="Redshift")
cb.ax.yaxis.set_label_position("left")
cb.ax.yaxis.set_ticks_position("left")
cb.set_label(label="Redshift", fontsize=fontsize)
plt.grid(axis="y")
if fontsize > 20:
leg_columns = 2
else:
leg_columns = 3
leg_rows = int(np.ceil(len(legend_names) / leg_columns))
legend_height = (2 * leg_rows) * font_inch
legend_height_norm = legend_height / fig_height # 0.25
axis_height = 3 * fontsize * point_size
axis_height_norm = axis_height / fig_height
plot_bottom = legend_height_norm + axis_height_norm
leg = plt.legend(
lines,
legend_names,
bbox_to_anchor=(0.45, legend_height_norm / 2.0),
loc="center",
bbox_transform=fig.transFigure,
ncol=leg_columns,
frameon=False,
)
for ind in range(len(leg.legendHandles)):
if ind not in theory_line_inds:
leg.legendHandles[ind].set_color("gray")
plt.subplots_adjust(bottom=plot_bottom)
fig.tight_layout()
plt.savefig(plot_filename)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--papers",
type=str,
nargs="+",
default=None,
help="Papers to include on plot "
"(must be in data directory). Defaults to all papers "
"in the data directory.",
)
parser.add_argument(
"--no_theory",
action="store_true",
help="Flag to not plot theory lines. If True, default range is modified.",
)
parser.add_argument(
"--theories",
type=str,
nargs="+",
default=None,
help="Theories to plot. Theory-specific options can be set to control which "
"lines are drawn.",
)
parser.add_argument(
"--theory_model",
nargs="+",
type=str,
default=None,
help="Model type to select from theories (e.g. 'bright' or 'faint' for "
"Mesinger et al. 2016).",
)
parser.add_argument(
"--theory_nf",
nargs="+",
type=str,
default=None,
help="Neutral fractions to select from theories.",
)
parser.add_argument(
"--theory_redshift",
nargs="+",
type=str,
default=None,
help="Redshifts to select from theories.",
)
parser.add_argument(
"--theory_linewidth",
nargs="+",
type=float,
default=None,
help="Linewidths for theory lines.",
)
parser.add_argument(
"--file",
type=str,
dest="filename",
help="Filename to save plot to.",
default="eor_limits.pdf",
)
parser.add_argument(
"--aspoints",
type=str,
nargs="+",
default=["patil_2017", "mertens_2020"],
help="Papers to plot as points rather than lines.",
)
parser.add_argument(
"--range",
type=float,
help="Range of Delta Squared to include on plot (yaxis range). "
"Defaults to [1e3, 1e6] if include_theory is false and [1e0, 1e6] otherwise",
default=None,
nargs="+",
)
parser.add_argument(
"--redshift",
type=float,
help="Range of redshifts to include on plot.",
default=None,
nargs="+",
)
parser.add_argument(
"--k_range",
type=float,
help="Range of k values to include on plot (xaxis range).",
default=None,
nargs="+",
)
parser.add_argument(
"--shade_limits",
type=str,
default="generational",
help="Type of shading above limits to apply, one of: 'generational', 'alpha' "
"or False.",
)
parser.add_argument(
"--shade_theory",
type=str,
default="flat",
help="Type of shading below theories to apply, one of: 'flat', 'alpha' "
"or False.",
)
parser.add_argument(
"--colormap", type=str, help="Matplotlib colormap to use.", default="Spectral_r"
)
parser.add_argument(
"--bold",
type=str,
nargs="+",
help="List of papers to bold in caption.",
default=None,
)
parser.add_argument("--fontsize", type=int, help="Font size to use.", default=15)
args = parser.parse_args()
if args.shade_limits == "False":
args.shade_limits = False
if args.shade_theory == "False":
args.shade_theory = False
if args.theories is not None:
if args.theory_nf is None:
args.theory_nf = [None]
else:
args.theory_nf = [
float(val) if val != "None" else None for val in args.theory_nf
]
if args.theory_redshift is None:
args.theory_redshift = [None]
if args.theory_model is None:
args.theory_model = [None]
theory_params = {}
num_theories = len(args.theories)
num_models = len(args.theory_model)
num_nf = len(args.theory_nf)
num_redshift = len(args.theory_redshift)
num_theory_lines = max([num_theories, num_models, num_nf, num_redshift])
if num_theory_lines > 1:
if num_theories == 1:
args.theories = args.theories * num_theory_lines
elif num_theories != num_theory_lines:
raise ValueError(
"Number of theories must be one or match the max length of "
"theory_model, theory_nf or theory_redshift."
)
if num_models == 1:
args.theory_model = args.theory_model * num_theory_lines
elif num_models != num_theory_lines:
raise ValueError(
"Number of theory_models must be one or match the max length of "
"theories, theory_nf or theory_redshift."
)
if num_nf == 1:
args.theory_nf = args.theory_nf * num_theory_lines
elif num_nf != num_theory_lines:
raise ValueError(
"Number of theory_nfs must be one or match the max length of "
"theories, theory_model or theory_redshift."
)
if num_redshift == 1:
args.theory_redshift = args.theory_redshift * num_theory_lines
elif num_redshift != num_theory_lines:
raise ValueError(
"Number of theory_redshifts must be one or match the max length of "
"theories, theory_model or theory_nf."
)
if args.theory_linewidth is not None:
if len(args.theory_linewidth) == 1:
args.theory_linewidth = args.theory_linewidth * num_theory_lines
elif len(args.theory_linewidth) != num_theory_lines:
raise ValueError(
"Number of theory lines must be one or match the max length of "
"theories, theory_model, theory_nf or theory_redshift."
)
for index, theory in enumerate(args.theories):
name = (
theory
+ "_"
+ str(args.theory_model[index])
+ "_nf_"
+ str(args.theory_nf[index])
+ "_z_"
+ str(args.theory_redshift[index])
)
theory_params[name] = {
"paper": theory,
"model": args.theory_model[index],
"nf": args.theory_nf[index],
"redshift": args.theory_redshift[index],
}
if args.theory_linewidth is not None:
theory_params[name]["linewidth"] = args.theory_linewidth[index]
else:
theory_params = default_theory_params
make_plot(
papers=args.papers,
include_theory=not args.no_theory,
theory_params=theory_params,
plot_as_points=args.aspoints,
delta_squared_range=args.range,
redshift_range=args.redshift,
k_range=args.k_range,
shade_limits=args.shade_limits,
shade_theory=args.shade_theory,
colormap=args.colormap,
plot_filename=args.filename,
bold_papers=args.bold,
fontsize=args.fontsize,
)
| 36.685076 | 88 | 0.522246 |
import glob
import os
import copy
import yaml
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cmx
import matplotlib.colors as colors
from eor_limits.data import DATA_PATH
default_theory_params = {
"munoz_2021_AllGalaxies_z8.5": {
"paper": "munoz_2021",
"model": "EOS",
"redshift": 8.5,
"linewidth": 3,
},
"mesinger_2016_faint_nf0.8": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_bright_nf0.8": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.8,
"linewidth": 2,
},
"mesinger_2016_faint_nf0.5": {
"paper": "mesinger_2016",
"model": "faint",
"nf": 0.5,
"linewidth": 3,
},
"mesinger_2016_bright_nf0.5": {
"paper": "mesinger_2016",
"model": "bright",
"nf": 0.5,
"linewidth": 2,
},
"pagano_beta1_z8.5": {"paper": "pagano_liu_2020", "beta": 1, "redshift": 8.5},
"pagano_beta-1_z8.5": {"paper": "pagano_liu_2020", "beta": -1, "redshift": 8.5},
}
def read_data_yaml(paper_name, theory=False):
if theory:
file_name = os.path.join(DATA_PATH, "theory", paper_name + ".yaml")
else:
file_name = os.path.join(DATA_PATH, paper_name + ".yaml")
with open(file_name, "r") as pfile:
paper_dict = yaml.safe_load(pfile)
if isinstance(paper_dict["delta_squared"][0], (str,)):
try:
paper_dict["delta_squared"] = [
float(val) for val in paper_dict["delta_squared"]
]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"] = val_list
elif isinstance(paper_dict["delta_squared"][0], (list,)) and isinstance(
paper_dict["delta_squared"][0][0], (str,)
):
for ind, elem in enumerate(paper_dict["delta_squared"]):
try:
paper_dict["delta_squared"][ind] = [float(val) for val in elem]
except (ValueError):
val_list = []
for val in paper_dict["delta_squared"][ind]:
if "**" in val:
val_split = val.split("**")
val_list.append(float(val_split[0]) ** float(val_split[1]))
else:
val_list.append(float(val))
paper_dict["delta_squared"][ind] = val_list
return paper_dict
def make_plot(
papers=None,
include_theory=True,
theory_legend=True,
theory_params=default_theory_params,
plot_as_points=["patil_2017", "mertens_2020"],
plot_filename="eor_limits.pdf",
delta_squared_range=None,
redshift_range=None,
k_range=None,
shade_limits="generational",
shade_theory="flat",
colormap="Spectral_r",
bold_papers=None,
fontsize=15,
):
if papers is None:
papers_sorted = False
papers = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "*.yaml"))
]
else:
papers_sorted = True
if delta_squared_range is None:
if include_theory:
delta_squared_range = [1e0, 1e6]
else:
delta_squared_range = [1e3, 1e6]
if bold_papers is None:
bold_papers = []
generation1 = [
"paciga_2013",
"dillon_2014",
"dillon_2015",
"beardsley_2016",
"patil_2017",
"kolopanis_2019",
]
paper_list = []
for paper_name in papers:
paper_dict = read_data_yaml(paper_name)
if paper_name in bold_papers:
paper_dict["bold"] = True
else:
paper_dict["bold"] = False
if paper_name in plot_as_points:
paper_dict["plot_as_point"] = True
else:
paper_dict["plot_as_point"] = False
if paper_name in generation1:
paper_dict["generation1"] = True
else:
paper_dict["generation1"] = False
paper_list.append(paper_dict)
if not papers_sorted:
paper_list.sort(key=lambda paper_list: paper_list["year"])
if include_theory:
theory_paper_list = []
for name, theory in theory_params.items():
theory_paper_yamls = [
os.path.splitext(os.path.basename(p))[0]
for p in glob.glob(os.path.join(DATA_PATH, "theory", "*.yaml"))
]
if theory["paper"] in theory_paper_yamls:
paper_dict = read_data_yaml(theory["paper"], theory=True)
elif theory["paper"] == "mesinger_2016":
from eor_limits.process_mesinger_2016 import get_mesinger_2016_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_mesinger_2016_line(**dict_use)
elif theory["paper"] == "pagano_liu_2020":
from eor_limits.process_pagano_2020 import get_pagano_2020_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_pagano_2020_line(**dict_use)
elif theory["paper"] == "munoz_2021":
from eor_limits.process_munoz_2021 import get_munoz_2021_line
dict_use = copy.deepcopy(theory)
dict_use.pop("paper")
paper_dict = get_munoz_2021_line(**dict_use)
else:
raise ValueError(
"Theory paper " + theory["paper"] + " is not a yaml in the "
"data/theory folder and is not a paper with a known processing "
"module."
)
theory_paper_list.append(paper_dict)
if redshift_range is not None:
if len(redshift_range) != 2:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
if redshift_range[0] >= redshift_range[1]:
raise ValueError(
"redshift range must have 2 elements with the second element greater "
"than the first element."
)
norm = colors.Normalize(vmin=redshift_range[0], vmax=redshift_range[1])
else:
redshift_list = []
for paper in paper_list:
if paper["type"] == "point":
delta_array = np.array(paper["delta_squared"])
paper_redshifts = np.array(paper["redshift"])
if paper_redshifts.size == 1 and delta_array.size > 1:
paper_redshifts = np.repeat(paper_redshifts[0], delta_array.size)
if k_range is not None:
k_vals = np.asarray(paper["k"])
inds_use = np.nonzero(
(delta_array <= delta_squared_range[1])
& (k_vals <= k_range[1])
& (k_vals >= k_range[0])
)[0]
else:
inds_use = np.nonzero(delta_array <= delta_squared_range[1])[0]
if len(paper["redshift"]) == 1 and inds_use.size > 0:
inds_use = np.asarray([0])
redshift_list += list(paper_redshifts[inds_use])
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
for ind, elem in enumerate(redshifts):
delta_array = np.asarray(delta_squared[ind])
if k_range is not None:
k_array = np.asarray(k_vals[ind])
if np.nanmin(delta_array) <= delta_squared_range[1] or (
np.min(k_array) <= k_range[1]
and np.max(k_array) >= k_range[0]
):
redshift_list.append(elem)
else:
if np.nanmin(delta_array) <= delta_squared_range[1]:
redshift_list.append(elem)
redshift_list = sorted(set(redshift_list))
if np.min(redshift_list) < np.max(redshift_list):
redshift_range_use = [redshift_list[0], redshift_list[-1]]
else:
# if only 1 redshift and no range specified, use a range of 2 centered on
# redshift of data.
redshift_range_use = [redshift_list[0] - 1, redshift_list[0] + 1]
norm = colors.Normalize(vmin=redshift_range_use[0], vmax=redshift_range_use[1])
scalar_map = cmx.ScalarMappable(norm=norm, cmap=colormap)
if include_theory:
fig_height = 20
else:
fig_height = 10
fig_width = 20
fig = plt.figure(figsize=(fig_width, fig_height))
legend_names = []
lines = []
paper_ks = []
skipped_papers = []
for paper_i, paper in enumerate(paper_list):
if paper["bold"]:
label_start = " $\\bf{"
else:
label_start = " $\\rm{"
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["telescope"].split(" "))
+ r"\ ("
+ paper["author"]
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
if paper["type"] == "point":
if len(paper["redshift"]) == 1 and len(paper["delta_squared"]) > 1:
paper["redshift"] = paper["redshift"] * len(paper["delta_squared"])
elif len(paper["redshift"]) != len(paper["delta_squared"]):
raise ValueError(f"{label} has the wrong number of redshift values.")
delta_squared = np.asarray(paper["delta_squared"])
if redshift_range is not None:
redshift_array = np.asarray(paper["redshift"])
points_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
& (delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
else:
points_use = np.where(
(delta_squared >= delta_squared_range[0])
& (delta_squared <= delta_squared_range[1])
)[0]
if points_use.size == 0:
skipped_papers.append(paper)
continue
else:
paper_ks.extend(list(np.asarray(paper["k"])[points_use]))
delta_squared = np.asarray(paper["delta_squared"])[points_use]
line = plt.scatter(
np.asarray(paper["k"])[points_use],
delta_squared,
marker=paper["marker"],
c=np.asarray(paper["redshift"])[points_use].tolist(),
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
for index in points_use:
k_edges = [paper["k_lower"][index], paper["k_upper"][index]]
delta_edges = [
paper["delta_squared"][index],
paper["delta_squared"][index],
]
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
lines.append(line)
else:
if not isinstance(paper["k"][0], list):
redshifts = [paper["redshift"][0]]
k_vals = [paper["k"]]
k_lower = [paper["k_lower"]]
k_upper = [paper["k_upper"]]
delta_squared = [paper["delta_squared"]]
else:
redshifts = list(np.squeeze(paper["redshift"]))
k_vals = paper["k"]
k_lower = paper["k_lower"]
k_upper = paper["k_upper"]
delta_squared = paper["delta_squared"]
if redshift_range is not None:
redshift_array = np.asarray(redshifts)
lines_use = np.where(
(redshift_array >= redshift_range[0])
& (redshift_array <= redshift_range[1])
)[0]
if lines_use.size == 0:
skipped_papers.append(paper)
continue
else:
lines_use = np.arange(len(redshifts))
for ind, redshift in enumerate(np.asarray(redshifts)[lines_use]):
paper_ks.extend(k_vals[ind])
k_edges = np.stack(
(np.asarray(k_lower[ind]), np.asarray(k_upper[ind]))
).T.flatten()
delta_edges = np.stack(
(np.asarray(delta_squared[ind]), np.asarray(delta_squared[ind]))
).T.flatten()
if paper["plot_as_point"]:
line = plt.scatter(
k_vals[ind],
delta_squared[ind],
marker=paper["marker"],
c=np.zeros(len(k_vals[ind])) + redshift,
cmap=colormap,
norm=norm,
edgecolors="black",
label=label,
s=150,
zorder=10,
)
else:
color_val = scalar_map.to_rgba(redshift)
# make black outline by plotting thicker black line first
plt.plot(
k_edges,
delta_edges,
c="black",
linewidth=paper["linewidth"] + 2,
zorder=2,
)
(line,) = plt.plot(
k_edges,
delta_edges,
c=color_val,
linewidth=paper["linewidth"],
label=label,
zorder=2,
)
if shade_limits is not False:
if shade_limits == "generational":
if paper["generation1"]:
color_use = "grey"
zorder = 1
alpha = 1
else:
color_use = "lightgrey"
zorder = 0
alpha = 1
else:
color_use = "grey"
zorder = 0
alpha = 0.5
plt.fill_between(
k_edges,
delta_edges,
delta_squared_range[1],
color=color_use,
alpha=alpha,
zorder=zorder,
)
if ind == 0:
lines.append(line)
legend_names.append(label)
if len(skipped_papers) == len(paper_list):
raise ValueError("No papers in specified redshift and/or delta squared range.")
theory_line_inds = []
if include_theory:
# we want to supress legend labels for theories with linewidth=0
# which are only used for shading
# fix ordering to put them at the end
linewidths = np.asarray([paper["linewidth"] for paper in theory_paper_list])
ordering = np.argsort(linewidths == 0)
theory_paper_list = [theory_paper_list[p] for p in ordering]
for paper in theory_paper_list:
label_start = " $\\bf{Theory:} \\rm{ "
label_end = "}$"
label = (
label_start
+ r"\ ".join(paper["model"].split(" "))
+ r"\ ("
+ r"\ ".join(paper["author"].split(" "))
+ r",\ "
+ str(paper["year"])
+ ")"
+ label_end
)
k_vals = paper["k"]
delta_squared = paper["delta_squared"]
(line,) = plt.plot(
k_vals,
delta_squared,
c="lightsteelblue",
linewidth=paper["linewidth"],
linestyle=paper["linestyle"],
zorder=2,
)
if shade_theory is not False:
if shade_theory == "flat":
color_use = "aliceblue"
zorder = 0
alpha = 1
else:
color_use = "lightsteelblue"
zorder = 0
alpha = 1.0 / len(theory_paper_list)
plt.fill_between(
k_vals,
delta_squared,
delta_squared_range[0],
color=color_use,
alpha=alpha,
zorder=zorder,
)
theory_line_inds.append(len(lines))
lines.append(line)
if paper["linewidth"] > 0 and theory_legend:
legend_names.append(label)
point_size = 1 / 72.0 # typography standard (points/inch)
font_inch = fontsize * point_size
plt.rcParams.update({"font.size": fontsize})
plt.xlabel("k ($h Mpc^{-1}$)", fontsize=fontsize)
plt.ylabel("$\Delta^2$ ($mK^2$)", fontsize=fontsize) # noqa
plt.yscale("log")
plt.xscale("log")
plt.ylim(*delta_squared_range)
if k_range is None:
k_range = [np.min(paper_ks), np.max(paper_ks)]
min_factor = 10 ** np.ceil(np.log10(k_range[0]) * -1)
max_factor = 10 ** np.ceil(np.log10(k_range[1]) * -1)
k_range = [
np.floor(k_range[0] * min_factor) / min_factor,
np.ceil(k_range[1] * max_factor) / max_factor,
]
plt.xlim(*k_range)
plt.tick_params(labelsize=fontsize)
cb = plt.colorbar(scalar_map, fraction=0.1, pad=0.08, label="Redshift")
cb.ax.yaxis.set_label_position("left")
cb.ax.yaxis.set_ticks_position("left")
cb.set_label(label="Redshift", fontsize=fontsize)
plt.grid(axis="y")
if fontsize > 20:
leg_columns = 2
else:
leg_columns = 3
leg_rows = int(np.ceil(len(legend_names) / leg_columns))
legend_height = (2 * leg_rows) * font_inch
legend_height_norm = legend_height / fig_height # 0.25
axis_height = 3 * fontsize * point_size
axis_height_norm = axis_height / fig_height
plot_bottom = legend_height_norm + axis_height_norm
leg = plt.legend(
lines,
legend_names,
bbox_to_anchor=(0.45, legend_height_norm / 2.0),
loc="center",
bbox_transform=fig.transFigure,
ncol=leg_columns,
frameon=False,
)
for ind in range(len(leg.legendHandles)):
if ind not in theory_line_inds:
leg.legendHandles[ind].set_color("gray")
plt.subplots_adjust(bottom=plot_bottom)
fig.tight_layout()
plt.savefig(plot_filename)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--papers",
type=str,
nargs="+",
default=None,
help="Papers to include on plot "
"(must be in data directory). Defaults to all papers "
"in the data directory.",
)
parser.add_argument(
"--no_theory",
action="store_true",
help="Flag to not plot theory lines. If True, default range is modified.",
)
parser.add_argument(
"--theories",
type=str,
nargs="+",
default=None,
help="Theories to plot. Theory-specific options can be set to control which "
"lines are drawn.",
)
parser.add_argument(
"--theory_model",
nargs="+",
type=str,
default=None,
help="Model type to select from theories (e.g. 'bright' or 'faint' for "
"Mesinger et al. 2016).",
)
parser.add_argument(
"--theory_nf",
nargs="+",
type=str,
default=None,
help="Neutral fractions to select from theories.",
)
parser.add_argument(
"--theory_redshift",
nargs="+",
type=str,
default=None,
help="Redshifts to select from theories.",
)
parser.add_argument(
"--theory_linewidth",
nargs="+",
type=float,
default=None,
help="Linewidths for theory lines.",
)
parser.add_argument(
"--file",
type=str,
dest="filename",
help="Filename to save plot to.",
default="eor_limits.pdf",
)
parser.add_argument(
"--aspoints",
type=str,
nargs="+",
default=["patil_2017", "mertens_2020"],
help="Papers to plot as points rather than lines.",
)
parser.add_argument(
"--range",
type=float,
help="Range of Delta Squared to include on plot (yaxis range). "
"Defaults to [1e3, 1e6] if include_theory is false and [1e0, 1e6] otherwise",
default=None,
nargs="+",
)
parser.add_argument(
"--redshift",
type=float,
help="Range of redshifts to include on plot.",
default=None,
nargs="+",
)
parser.add_argument(
"--k_range",
type=float,
help="Range of k values to include on plot (xaxis range).",
default=None,
nargs="+",
)
parser.add_argument(
"--shade_limits",
type=str,
default="generational",
help="Type of shading above limits to apply, one of: 'generational', 'alpha' "
"or False.",
)
parser.add_argument(
"--shade_theory",
type=str,
default="flat",
help="Type of shading below theories to apply, one of: 'flat', 'alpha' "
"or False.",
)
parser.add_argument(
"--colormap", type=str, help="Matplotlib colormap to use.", default="Spectral_r"
)
parser.add_argument(
"--bold",
type=str,
nargs="+",
help="List of papers to bold in caption.",
default=None,
)
parser.add_argument("--fontsize", type=int, help="Font size to use.", default=15)
args = parser.parse_args()
if args.shade_limits == "False":
args.shade_limits = False
if args.shade_theory == "False":
args.shade_theory = False
if args.theories is not None:
if args.theory_nf is None:
args.theory_nf = [None]
else:
args.theory_nf = [
float(val) if val != "None" else None for val in args.theory_nf
]
if args.theory_redshift is None:
args.theory_redshift = [None]
if args.theory_model is None:
args.theory_model = [None]
theory_params = {}
num_theories = len(args.theories)
num_models = len(args.theory_model)
num_nf = len(args.theory_nf)
num_redshift = len(args.theory_redshift)
num_theory_lines = max([num_theories, num_models, num_nf, num_redshift])
if num_theory_lines > 1:
if num_theories == 1:
args.theories = args.theories * num_theory_lines
elif num_theories != num_theory_lines:
raise ValueError(
"Number of theories must be one or match the max length of "
"theory_model, theory_nf or theory_redshift."
)
if num_models == 1:
args.theory_model = args.theory_model * num_theory_lines
elif num_models != num_theory_lines:
raise ValueError(
"Number of theory_models must be one or match the max length of "
"theories, theory_nf or theory_redshift."
)
if num_nf == 1:
args.theory_nf = args.theory_nf * num_theory_lines
elif num_nf != num_theory_lines:
raise ValueError(
"Number of theory_nfs must be one or match the max length of "
"theories, theory_model or theory_redshift."
)
if num_redshift == 1:
args.theory_redshift = args.theory_redshift * num_theory_lines
elif num_redshift != num_theory_lines:
raise ValueError(
"Number of theory_redshifts must be one or match the max length of "
"theories, theory_model or theory_nf."
)
if args.theory_linewidth is not None:
if len(args.theory_linewidth) == 1:
args.theory_linewidth = args.theory_linewidth * num_theory_lines
elif len(args.theory_linewidth) != num_theory_lines:
raise ValueError(
"Number of theory lines must be one or match the max length of "
"theories, theory_model, theory_nf or theory_redshift."
)
for index, theory in enumerate(args.theories):
name = (
theory
+ "_"
+ str(args.theory_model[index])
+ "_nf_"
+ str(args.theory_nf[index])
+ "_z_"
+ str(args.theory_redshift[index])
)
theory_params[name] = {
"paper": theory,
"model": args.theory_model[index],
"nf": args.theory_nf[index],
"redshift": args.theory_redshift[index],
}
if args.theory_linewidth is not None:
theory_params[name]["linewidth"] = args.theory_linewidth[index]
else:
theory_params = default_theory_params
make_plot(
papers=args.papers,
include_theory=not args.no_theory,
theory_params=theory_params,
plot_as_points=args.aspoints,
delta_squared_range=args.range,
redshift_range=args.redshift,
k_range=args.k_range,
shade_limits=args.shade_limits,
shade_theory=args.shade_theory,
colormap=args.colormap,
plot_filename=args.filename,
bold_papers=args.bold,
fontsize=args.fontsize,
)
| true | true |
1c3ce37f6dbaffac25085825bc876854448b7f04 | 1,483 | py | Python | tests/setup_tests.py | trungngv/CHAID | 794756560872e944cec6a6dcc780feeeeadc51ed | [
"Apache-2.0"
] | 141 | 2016-06-14T13:38:38.000Z | 2022-02-03T12:01:18.000Z | tests/setup_tests.py | trungngv/CHAID | 794756560872e944cec6a6dcc780feeeeadc51ed | [
"Apache-2.0"
] | 110 | 2016-06-16T14:30:34.000Z | 2022-01-28T19:36:10.000Z | tests/setup_tests.py | trungngv/CHAID | 794756560872e944cec6a6dcc780feeeeadc51ed | [
"Apache-2.0"
] | 47 | 2016-11-27T16:21:43.000Z | 2021-12-28T08:40:51.000Z | """
This module provides helper functions for the rest of the testing module
"""
from collections import Iterable
import os
import sys
from math import isnan
import numpy as np
ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path = [ROOT_FOLDER] + sys.path
np.seterr(divide='ignore', invalid='ignore')
import CHAID
def islist(a):
return isinstance(a, Iterable) and not isinstance(a, str)
def str_ndlist(a):
return [str_ndlist(i) for i in a] if islist(a) else str(a)
def list_unordered_equal(list_a, list_b):
""" Compares the unordered contents of two nd lists"""
if islist(list_a) and islist(list_b):
list_a = [str_ndlist(item_a) for item_a in list_a]
list_b = [str_ndlist(item_b) for item_b in list_b]
list_a.sort()
list_b.sort()
return len(list_a) == len(list_b) and all(list_unordered_equal(*item) for item in zip(list_a, list_b))
else:
return list_a == list_b or (isinstance(float, str) and isnan(list_a) and isnan(list_b))
def list_ordered_equal(list_a, list_b):
""" Compares the unordered contents of two nd lists"""
if islist(list_a) and islist(list_b):
list_a = [item_a for item_a in list_a]
list_b = [item_b for item_b in list_b]
return len(list_a) == len(list_b) and all(list_ordered_equal(*item) for item in zip(list_a, list_b))
else:
return list_a == list_b or (isnan(list_a) and isnan(list_b))
| 32.23913 | 110 | 0.691167 |
from collections import Iterable
import os
import sys
from math import isnan
import numpy as np
ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
sys.path = [ROOT_FOLDER] + sys.path
np.seterr(divide='ignore', invalid='ignore')
import CHAID
def islist(a):
return isinstance(a, Iterable) and not isinstance(a, str)
def str_ndlist(a):
return [str_ndlist(i) for i in a] if islist(a) else str(a)
def list_unordered_equal(list_a, list_b):
if islist(list_a) and islist(list_b):
list_a = [str_ndlist(item_a) for item_a in list_a]
list_b = [str_ndlist(item_b) for item_b in list_b]
list_a.sort()
list_b.sort()
return len(list_a) == len(list_b) and all(list_unordered_equal(*item) for item in zip(list_a, list_b))
else:
return list_a == list_b or (isinstance(float, str) and isnan(list_a) and isnan(list_b))
def list_ordered_equal(list_a, list_b):
if islist(list_a) and islist(list_b):
list_a = [item_a for item_a in list_a]
list_b = [item_b for item_b in list_b]
return len(list_a) == len(list_b) and all(list_ordered_equal(*item) for item in zip(list_a, list_b))
else:
return list_a == list_b or (isnan(list_a) and isnan(list_b))
| true | true |
1c3ce3c941daffcd8c87691cb40d3903a4a8bf21 | 10,205 | py | Python | Stock/StockAdd.py | LaDane/Gamehelper | 55357046471ca8eb560a787b52fd5cbf450d6697 | [
"MIT"
] | null | null | null | Stock/StockAdd.py | LaDane/Gamehelper | 55357046471ca8eb560a787b52fd5cbf450d6697 | [
"MIT"
] | null | null | null | Stock/StockAdd.py | LaDane/Gamehelper | 55357046471ca8eb560a787b52fd5cbf450d6697 | [
"MIT"
] | null | null | null | import discord
import json
import asyncio
from discord.ext import commands
from filehandler import FileHandler
from jsonhandler import JsonHandler
jh = JsonHandler()
fh = FileHandler()
class StockAdd(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.load_data()
def load_data(self):
self.worlditems = fh.load_file('worlditems')
# self.currency = fh.load_file('currency')
self.shops = fh.load_file('shops')
def s_s_t(self):
return jh.show_shop_titles()
def s_s_sid(self):
return jh.show_shop_stockid2()
def s_wi_t(self):
return jh.show_worlditem_titles()
# Used to generate a new unique number from a list
def Convert(self, string):
li = list(string.split(" "))
return li
# Add items as stock to a shop
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == 699194951535427635: # Channel id of "shop-editor"
if message.content.startswith('stockshop'):
channel = message.channel
await channel.purge(limit=10)
self.load_data()
try:
await channel.send(self.s_s_t())
msg1 = await channel.send("-\nAbove is a list of registered [S-ID] \nType the **Shop-ID** [S-ID] that you would like to add items to")
await msg1.add_reaction(emoji='\U0001F6D1') # Add cancel reaction to message
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
shopid = msg.content
if shopid == "cancel": # Takes use of CancelMenu cog
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
if not shopid in self.shops:
await channel.purge(limit=10)
await channel.send("You have entered a S-ID that's not registered. Make sure that the entered text is an **exact** match to a Shop-ID\nCanceling request...")
if shopid in self.shops:
await channel.purge(limit=10)
await channel.send(f"You have chosen to stock the shelves of **{shopid}**")
await asyncio.sleep(3)
await channel.purge(limit=10)
# Generate new unique number from list CODE
chair_inv_numbers = self.s_s_sid()
if len(chair_inv_numbers) == 0:
new_number = 0
if len(chair_inv_numbers) != 0:
chair_inv_numbers = chair_inv_numbers.strip(' ')
convert_chair_inv_numbers = self.Convert(chair_inv_numbers)
sorted(convert_chair_inv_numbers)
sorted(map(int,convert_chair_inv_numbers))
max(convert_chair_inv_numbers)
new_number = max(map(int,convert_chair_inv_numbers))
unique_new_number = int(new_number) + 1
# Generate code above REMEMBER def Convert at top!!!
shopentryid = unique_new_number
try:
await channel.send(self.s_wi_t())
except:
await channel.send("*No [W-ID] registered yet*")
msg3 = await channel.send(f"-\nAbove is a list of registered [W-ID] (numbers in bold)\nWhich **W-ID** would you like to add to the shop?")
await msg3.add_reaction(emoji='\U0001F6D1') # Add cancel reaction to message
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
worldid = msg.content
if worldid == "cancel": # Takes use of CancelMenu cog
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
if not worldid in self.worlditems:
await channel.purge(limit=10)
await channel.send("You have entered a W-ID that's not registered. Make sure that the entered text is an **exact** match to a world ID\nCanceling request...")
if worldid in self.worlditems:
await channel.purge(limit=10)
await channel.send(f"You have chosen to add **{self.worlditems[worldid]['ItemName']}** to **{shopid}**")
embed = discord.Embed(title=f"**{self.worlditems[worldid]['ItemName']}**", description=f"*{self.worlditems[worldid]['Description']}*", color=discord.Color.red())
embed.set_image(url=f"{self.worlditems[worldid]['Picture']}")
embed.set_footer(text=f"W-ID [{worldid}]")
embed.add_field(name="Stats", value=f"{self.worlditems[worldid]['StatsModifier']} {self.worlditems[worldid]['Stats']}", inline=False)
embed.add_field(name="Type", value=f"{self.worlditems[worldid]['Type']}", inline=True)
embed.add_field(name="Weight", value=f"{self.worlditems[worldid]['Weight']} slots")
embed.add_field(name="Value", value=f"{self.worlditems[worldid]['Value']}")
await channel.send(embed=embed)
msg4 = await channel.send(f"How many of '{self.worlditems[worldid]['ItemName']}' would you like to stock in {shopid}")
await msg4.add_reaction(emoji='\U0001F6D1') # Add cancel reaction to message
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
quantity = msg.content
if quantity == "cancel": # Takes use of CancelMenu cog
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
await channel.purge(limit=10)
buy_channel_id = self.shops[shopid]["ShopBuyID"]
buy_channel = self.bot.get_channel(buy_channel_id)
try:
stock_embed = discord.Embed(title=f"**{self.worlditems[worldid]['ItemName']}**", description=f"*{self.worlditems[worldid]['Description']}*", color=discord.Color.red())
stock_embed.set_image(url=f"{self.worlditems[worldid]['Picture']}")
stock_embed.set_footer(text=f"W-ID [{worldid}]\nSE-ID [{shopentryid}]")
stock_embed.add_field(name="Stats", value=f"{self.worlditems[worldid]['StatsModifier']} {self.worlditems[worldid]['Stats']}", inline=False)
stock_embed.add_field(name="Type", value=f"{self.worlditems[worldid]['Type']}", inline=True)
stock_embed.add_field(name="Weight", value=f"{self.worlditems[worldid]['Weight']} slots")
stock_embed.add_field(name="Value", value=f"{self.worlditems[worldid]['Value']}")
stock_embed.add_field(name="Amount in stock", value=f"{quantity}")
shop_stock_msg = await buy_channel.send(embed=stock_embed)
shop_stock_msg_id = shop_stock_msg.id
await shop_stock_msg.add_reaction(emoji='\U0001F4B0') # Add moneybag reaction to message
self.shops[shopid]["Stock"][shopentryid] = {}
self.shops[shopid]["Stock"][shopentryid]["WorldID"] = worldid
self.shops[shopid]["Stock"][shopentryid]["Quantity"] = quantity
self.shops[shopid]["Stock"][shopentryid]["BuyStockMsgID"] = shop_stock_msg_id
fh.save_file(self.shops, 'shops')
await asyncio.sleep(1)
await channel.send(f"Shop entry **{self.worlditems[worldid]['ItemName']}** has succesfully been added to **{shopid}**!")
except:
await channel.send("No buy channel exists for this shop, please set up shop properly\nEntry failed...")
except:
await channel.send("*No [S-ID] registered yet*\n**Please set up a shop before adding stock!**")
def setup(bot):
bot.add_cog(StockAdd(bot))
# =======================
# JUNK
#========================
# MeM data handler
# def format_shop(self):
# print_str = ""
# for title, value in self.shops.items():
# print_str += f"{title} {value['ShopOwner']}\n"
# return print_str
# Written in code below
# await channel.send(self.format_shop())
# HELP HERE!
# def show_shop_stockid(self): # This function is for when you would like to display the titles in shops.json
# print_str = ""
# for title, value in self.shops.items():
# print_str += f"{title} {value['Stock']['ShopEntryID']}\n"
# show_shop_stockid_title = title # since title is not used in this command, this sets it to nothing and we have no problems in code
# return print_str
| 51.025 | 199 | 0.524351 | import discord
import json
import asyncio
from discord.ext import commands
from filehandler import FileHandler
from jsonhandler import JsonHandler
jh = JsonHandler()
fh = FileHandler()
class StockAdd(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.load_data()
def load_data(self):
self.worlditems = fh.load_file('worlditems')
self.shops = fh.load_file('shops')
def s_s_t(self):
return jh.show_shop_titles()
def s_s_sid(self):
return jh.show_shop_stockid2()
def s_wi_t(self):
return jh.show_worlditem_titles()
def Convert(self, string):
li = list(string.split(" "))
return li
@commands.Cog.listener()
async def on_message(self, message):
if message.channel.id == 699194951535427635:
if message.content.startswith('stockshop'):
channel = message.channel
await channel.purge(limit=10)
self.load_data()
try:
await channel.send(self.s_s_t())
msg1 = await channel.send("-\nAbove is a list of registered [S-ID] \nType the **Shop-ID** [S-ID] that you would like to add items to")
await msg1.add_reaction(emoji='\U0001F6D1')
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
shopid = msg.content
if shopid == "cancel":
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
if not shopid in self.shops:
await channel.purge(limit=10)
await channel.send("You have entered a S-ID that's not registered. Make sure that the entered text is an **exact** match to a Shop-ID\nCanceling request...")
if shopid in self.shops:
await channel.purge(limit=10)
await channel.send(f"You have chosen to stock the shelves of **{shopid}**")
await asyncio.sleep(3)
await channel.purge(limit=10)
# Generate new unique number from list CODE
chair_inv_numbers = self.s_s_sid()
if len(chair_inv_numbers) == 0:
new_number = 0
if len(chair_inv_numbers) != 0:
chair_inv_numbers = chair_inv_numbers.strip(' ')
convert_chair_inv_numbers = self.Convert(chair_inv_numbers)
sorted(convert_chair_inv_numbers)
sorted(map(int,convert_chair_inv_numbers))
max(convert_chair_inv_numbers)
new_number = max(map(int,convert_chair_inv_numbers))
unique_new_number = int(new_number) + 1
# Generate code above REMEMBER def Convert at top!!!
shopentryid = unique_new_number
try:
await channel.send(self.s_wi_t())
except:
await channel.send("*No [W-ID] registered yet*")
msg3 = await channel.send(f"-\nAbove is a list of registered [W-ID] (numbers in bold)\nWhich **W-ID** would you like to add to the shop?")
await msg3.add_reaction(emoji='\U0001F6D1') # Add cancel reaction to message
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
worldid = msg.content
if worldid == "cancel": # Takes use of CancelMenu cog
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
if not worldid in self.worlditems:
await channel.purge(limit=10)
await channel.send("You have entered a W-ID that's not registered. Make sure that the entered text is an **exact** match to a world ID\nCanceling request...")
if worldid in self.worlditems:
await channel.purge(limit=10)
await channel.send(f"You have chosen to add **{self.worlditems[worldid]['ItemName']}** to **{shopid}**")
embed = discord.Embed(title=f"**{self.worlditems[worldid]['ItemName']}**", description=f"*{self.worlditems[worldid]['Description']}*", color=discord.Color.red())
embed.set_image(url=f"{self.worlditems[worldid]['Picture']}")
embed.set_footer(text=f"W-ID [{worldid}]")
embed.add_field(name="Stats", value=f"{self.worlditems[worldid]['StatsModifier']} {self.worlditems[worldid]['Stats']}", inline=False)
embed.add_field(name="Type", value=f"{self.worlditems[worldid]['Type']}", inline=True)
embed.add_field(name="Weight", value=f"{self.worlditems[worldid]['Weight']} slots")
embed.add_field(name="Value", value=f"{self.worlditems[worldid]['Value']}")
await channel.send(embed=embed)
msg4 = await channel.send(f"How many of '{self.worlditems[worldid]['ItemName']}' would you like to stock in {shopid}")
await msg4.add_reaction(emoji='\U0001F6D1')
await asyncio.sleep(1)
msg = await self.bot.wait_for('message', check=lambda message: message.author == message.author and message.channel == channel)
quantity = msg.content
if quantity == "cancel":
await channel.purge(limit=10)
await channel.send("Command canceled!")
return
await channel.purge(limit=10)
buy_channel_id = self.shops[shopid]["ShopBuyID"]
buy_channel = self.bot.get_channel(buy_channel_id)
try:
stock_embed = discord.Embed(title=f"**{self.worlditems[worldid]['ItemName']}**", description=f"*{self.worlditems[worldid]['Description']}*", color=discord.Color.red())
stock_embed.set_image(url=f"{self.worlditems[worldid]['Picture']}")
stock_embed.set_footer(text=f"W-ID [{worldid}]\nSE-ID [{shopentryid}]")
stock_embed.add_field(name="Stats", value=f"{self.worlditems[worldid]['StatsModifier']} {self.worlditems[worldid]['Stats']}", inline=False)
stock_embed.add_field(name="Type", value=f"{self.worlditems[worldid]['Type']}", inline=True)
stock_embed.add_field(name="Weight", value=f"{self.worlditems[worldid]['Weight']} slots")
stock_embed.add_field(name="Value", value=f"{self.worlditems[worldid]['Value']}")
stock_embed.add_field(name="Amount in stock", value=f"{quantity}")
shop_stock_msg = await buy_channel.send(embed=stock_embed)
shop_stock_msg_id = shop_stock_msg.id
await shop_stock_msg.add_reaction(emoji='\U0001F4B0')
self.shops[shopid]["Stock"][shopentryid] = {}
self.shops[shopid]["Stock"][shopentryid]["WorldID"] = worldid
self.shops[shopid]["Stock"][shopentryid]["Quantity"] = quantity
self.shops[shopid]["Stock"][shopentryid]["BuyStockMsgID"] = shop_stock_msg_id
fh.save_file(self.shops, 'shops')
await asyncio.sleep(1)
await channel.send(f"Shop entry **{self.worlditems[worldid]['ItemName']}** has succesfully been added to **{shopid}**!")
except:
await channel.send("No buy channel exists for this shop, please set up shop properly\nEntry failed...")
except:
await channel.send("*No [S-ID] registered yet*\n**Please set up a shop before adding stock!**")
def setup(bot):
bot.add_cog(StockAdd(bot))
| true | true |
1c3ce40c51217203e9708b52bba7795f1025c118 | 937 | py | Python | manual_time_write.py | n3cr0Tech/bluetooth_device_hack | 6423750f8d8070dbdd5757e369472be3f22acd05 | [
"MIT"
] | null | null | null | manual_time_write.py | n3cr0Tech/bluetooth_device_hack | 6423750f8d8070dbdd5757e369472be3f22acd05 | [
"MIT"
] | null | null | null | manual_time_write.py | n3cr0Tech/bluetooth_device_hack | 6423750f8d8070dbdd5757e369472be3f22acd05 | [
"MIT"
] | null | null | null | # NOTE: this code ONLY runs on Raspberry Pi
import pygatt
import time
adapter = pygatt.GATTToolBackend()
adapter.start()
print('PyGatt Adapter Started')
MAC_ADDR = 'ENTER YOUR BLE DEVICE MAC ADDRESS HERE'
device = adapter.connect(address=MAC_ADDR, address_type=pygatt.BLEAddressType.random)
hex_command = '21ff1006140b1d023329' # alter this value for your experimentation
adapter.sendline('char-write-cmd 0x0b ' + hex_command)
print('sending ' + hex_command + ' to 0x0b')
#adapter.sendline('char-write-cmd 0x0025 a106410a1a1a30b7e320bda291')
#adapter.sendline('char-write-cmd 0x0025 a107')
#print '-->headers sent'
#adapter.sendline('char-write-cmd 0x0025 a104030501') #Mode 3 of 5 Intensity with 1Min
#time.sleep(0.3)
#adapter.sendline('char-write-cmd 0x0025 a104070f04') #Mode 7 of 15 Intensity with 4Min
#time.sleep(0.3)
#print '-->High Intensity Triggered for 4 Min duration'
adapter.stop()
print('--> Disconnected from BLE Device') | 44.619048 | 87 | 0.773746 |
import pygatt
import time
adapter = pygatt.GATTToolBackend()
adapter.start()
print('PyGatt Adapter Started')
MAC_ADDR = 'ENTER YOUR BLE DEVICE MAC ADDRESS HERE'
device = adapter.connect(address=MAC_ADDR, address_type=pygatt.BLEAddressType.random)
hex_command = '21ff1006140b1d023329'
adapter.sendline('char-write-cmd 0x0b ' + hex_command)
print('sending ' + hex_command + ' to 0x0b')
| true | true |
1c3ce44f8305755f403894037daffd8f0fffed5b | 1,778 | py | Python | js/soundShader/mo.py | pome-ta/draftPythonistaScripts | 5e0e2c286589a8069dd8963c2653fe3d783dcd6c | [
"MIT"
] | null | null | null | js/soundShader/mo.py | pome-ta/draftPythonistaScripts | 5e0e2c286589a8069dd8963c2653fe3d783dcd6c | [
"MIT"
] | 3 | 2021-08-15T14:44:23.000Z | 2021-08-15T16:19:20.000Z | js/soundShader/mo.py | pome-ta/draftPythonistaScripts | 63fe06fa369d536fdcc3fb4216931656a515b734 | [
"MIT"
] | null | null | null | import os
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
import pathlib
import ui
#sys.path.append(str(pathlib.Path.cwd())
import wkwebview
os.chdir(os.path.join(os.path.dirname(__file__), 'public'))
uri = pathlib.Path('./index.html')
httpd = HTTPServer(('', 8000), SimpleHTTPRequestHandler)
class MyWebViewDelegate:
def webview_should_start_load(self, webview, url, nav_type):
"""
See nav_type options at
https://developer.apple.com/documentation/webkit/wknavigationtype?language=objc
"""
print('Will start loading', url)
return True
def webview_did_start_load(self, webview):
#print('Started loading')
pass
@ui.in_background
def webview_did_finish_load(self, webview):
#str(webview.eval_js('document.title'))
print('Finished loading ' + str(webview.eval_js('document.title')))
#pass
class View(ui.View):
def __init__(self, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
#self.wv = wkwebview.WKWebView(delegate=MyWebViewDelegate())
self.wv = wkwebview.WKWebView()
#self.present(style='fullscreen', orientations=['portrait'])
#self.wv.load_url(str(uri), True)
#self.wv.load_url('http://localhost:8000/')
self.wv.flex = 'WH'
self.add_subview(self.wv)
'''
def layout(self):
self.wv.width = self.width
self.wv.height = self.height
#self.wv.flex = 'WH'
'''
def will_close(self):
self.wv.clear_cache()
httpd.shutdown()
if __name__ == '__main__':
view = View()
view.present(style='panel', orientations=['portrait'])
#view.wv.clear_cache()
try:
view.wv.load_url('http://localhost:8000/')
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
print('Server stopped')
| 23.394737 | 83 | 0.683915 | import os
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
import pathlib
import ui
import wkwebview
os.chdir(os.path.join(os.path.dirname(__file__), 'public'))
uri = pathlib.Path('./index.html')
httpd = HTTPServer(('', 8000), SimpleHTTPRequestHandler)
class MyWebViewDelegate:
def webview_should_start_load(self, webview, url, nav_type):
print('Will start loading', url)
return True
def webview_did_start_load(self, webview):
pass
@ui.in_background
def webview_did_finish_load(self, webview):
print('Finished loading ' + str(webview.eval_js('document.title')))
class View(ui.View):
def __init__(self, *args, **kwargs):
ui.View.__init__(self, *args, **kwargs)
self.wv = wkwebview.WKWebView()
self.wv.flex = 'WH'
self.add_subview(self.wv)
def will_close(self):
self.wv.clear_cache()
httpd.shutdown()
if __name__ == '__main__':
view = View()
view.present(style='panel', orientations=['portrait'])
try:
view.wv.load_url('http://localhost:8000/')
httpd.serve_forever()
except KeyboardInterrupt:
httpd.shutdown()
print('Server stopped')
| true | true |
1c3ce4dd90830cfd75fd2868a6010a2341a7eed9 | 5,488 | py | Python | src/hg/makeDb/genbank/src/lib/py/genbank/Pipeline.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 171 | 2015-04-22T15:16:02.000Z | 2022-03-18T20:21:53.000Z | src/hg/makeDb/genbank/src/lib/py/genbank/Pipeline.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 60 | 2016-10-03T15:15:06.000Z | 2022-03-30T15:21:52.000Z | src/hg/makeDb/genbank/src/lib/py/genbank/Pipeline.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 80 | 2015-04-16T10:39:48.000Z | 2022-03-29T16:36:30.000Z | "File-like object to create and manage a pipeline of subprocesses"
import subprocess
def hasWhiteSpace(word):
"check if a string contains any whitespace"
for c in word:
if c.isspace():
return True
return False
class Proc(subprocess.Popen):
"""A process in the pipeline. This extends subprocess.Popen(),
it also has the following members:
cmd - command argument vector
"""
def __init__(self, cmd, stdin, stdout):
self.cmd = list(cmd) # clone list
# need close_fds, or write pipe line fails due to pipes being
# incorrectly left open (FIXME: report bug)
subprocess.Popen.__init__(self, self.cmd, stdin=stdin, stdout=stdout, close_fds=True)
def getDesc(self):
"""get command as a string to use as a description of the process.
Single quote white-space containing arguments."""
strs = []
for w in self.cmd:
if hasWhiteSpace(w):
strs.append("'" + w + "'")
else:
strs.append(w)
return " ".join(strs)
class Pipeline(object):
"""File-like object to create and manage a pipeline of subprocesses.
procs - an ordered list of Proc objects that compose the pipeine"""
def __init__(self, cmds, mode='r', otherEnd=None):
"""cmds is either a list of arguments for a single process, or
a list of such lists for a pipeline. Mode is 'r' for a pipeline
who's output will be read, or 'w' for a pipeline to that is to
have data written to it. If otherEnd is specified, and is a string,
it is a file to open as stdio file at the other end of the pipeline.
If it's not a string, it is assumed to be a file object to use for output.
read pipeline ('r'):
otherEnd --> cmd[0] --> ... --> cmd[n] --> fh
write pipeline ('w')
fh --> cmd[0] --> ... --> cmd[n] --> otherEnd
The field fh is the file object used to access the pipeline.
"""
if (mode == "r") and (mode == "w"):
raise IOError('invalid mode "' + mode + '"')
self.mode = mode
self.procs = []
self.isRunning = True
self.failExcept = None
if isinstance(cmds[0], str):
cmds = [cmds] # one-process pipeline
(firstIn, lastOut, otherFh) = self._setupEnds(otherEnd)
for cmd in cmds:
self._createProc(cmd, cmds, firstIn, lastOut)
# finish up
if otherFh != None:
otherFh.close()
if mode == "r":
self.fh = self.procs[len(self.procs)-1].stdout
else:
self.fh = self.procs[0].stdin
def _setupEnds(self, otherEnd):
"set files at ends of a pipeline"
# setup other end of pipeline
if otherEnd != None:
if isinstance(otherEnd, str):
otherFh = file(otherEnd, self.mode)
else:
otherFh = otherEnd
if self.mode == "r":
firstIn = otherFh
else:
lastOut = otherFh
else:
otherFh = None
if self.mode == "r":
firstIn = 0
else:
lastOut = 1
# setup this end of pipe
if self.mode == "r":
lastOut = subprocess.PIPE
else:
firstIn = subprocess.PIPE
return (firstIn, lastOut, otherFh)
def _createProc(self, cmd, cmds, firstIn, lastOut):
"""create one process"""
if (cmd == cmds[0]):
stdin = firstIn # first process in pipeline
else:
stdin = self.procs[len(self.procs)-1].stdout
if (cmd == cmds[len(cmds)-1]):
stdout = lastOut # last process in pipeline
else:
stdout = subprocess.PIPE
p = Proc(cmd, stdin=stdin, stdout=stdout)
self.procs.append(p)
def getDesc(self):
"""get the pipeline commands as a string to use as a description"""
strs = []
for p in self.procs:
strs.append(p.getDesc())
return " | ".join(strs)
def wait(self, noError=False):
"""wait to for processes to complete, generate an exception if one exits
no-zero, unless noError is True, in which care return the exit code of the
first process that failed"""
self.isRunning = False
# must close before waits for output pipeline
if self.mode == 'w':
self.fh.close()
# wait on processes
firstFail = None
for p in self.procs:
if p.wait() != 0:
if firstFail == None:
firstFail = p
# must close after waits for input pipeline
if self.mode == 'r':
self.fh.close()
# handle failures
if firstFail != None:
self.failExcept = OSError(("process exited with %d: \"%s\" in pipeline \"%s\""
% (firstFail.returncode, firstFail.getDesc(), self.getDesc())))
if not noError:
raise self.failExcept
else:
return firstFail.returncode
else:
return 0
def close(self):
"wait for process to complete, with an error if it exited non-zero"
if self.isRunning:
self.wait()
if self.failExcept != None:
raise failExcept
| 32.666667 | 102 | 0.543003 |
import subprocess
def hasWhiteSpace(word):
for c in word:
if c.isspace():
return True
return False
class Proc(subprocess.Popen):
def __init__(self, cmd, stdin, stdout):
self.cmd = list(cmd)
subprocess.Popen.__init__(self, self.cmd, stdin=stdin, stdout=stdout, close_fds=True)
def getDesc(self):
strs = []
for w in self.cmd:
if hasWhiteSpace(w):
strs.append("'" + w + "'")
else:
strs.append(w)
return " ".join(strs)
class Pipeline(object):
def __init__(self, cmds, mode='r', otherEnd=None):
if (mode == "r") and (mode == "w"):
raise IOError('invalid mode "' + mode + '"')
self.mode = mode
self.procs = []
self.isRunning = True
self.failExcept = None
if isinstance(cmds[0], str):
cmds = [cmds]
(firstIn, lastOut, otherFh) = self._setupEnds(otherEnd)
for cmd in cmds:
self._createProc(cmd, cmds, firstIn, lastOut)
if otherFh != None:
otherFh.close()
if mode == "r":
self.fh = self.procs[len(self.procs)-1].stdout
else:
self.fh = self.procs[0].stdin
def _setupEnds(self, otherEnd):
if otherEnd != None:
if isinstance(otherEnd, str):
otherFh = file(otherEnd, self.mode)
else:
otherFh = otherEnd
if self.mode == "r":
firstIn = otherFh
else:
lastOut = otherFh
else:
otherFh = None
if self.mode == "r":
firstIn = 0
else:
lastOut = 1
if self.mode == "r":
lastOut = subprocess.PIPE
else:
firstIn = subprocess.PIPE
return (firstIn, lastOut, otherFh)
def _createProc(self, cmd, cmds, firstIn, lastOut):
if (cmd == cmds[0]):
stdin = firstIn
else:
stdin = self.procs[len(self.procs)-1].stdout
if (cmd == cmds[len(cmds)-1]):
stdout = lastOut
else:
stdout = subprocess.PIPE
p = Proc(cmd, stdin=stdin, stdout=stdout)
self.procs.append(p)
def getDesc(self):
strs = []
for p in self.procs:
strs.append(p.getDesc())
return " | ".join(strs)
def wait(self, noError=False):
self.isRunning = False
if self.mode == 'w':
self.fh.close()
firstFail = None
for p in self.procs:
if p.wait() != 0:
if firstFail == None:
firstFail = p
if self.mode == 'r':
self.fh.close()
if firstFail != None:
self.failExcept = OSError(("process exited with %d: \"%s\" in pipeline \"%s\""
% (firstFail.returncode, firstFail.getDesc(), self.getDesc())))
if not noError:
raise self.failExcept
else:
return firstFail.returncode
else:
return 0
def close(self):
if self.isRunning:
self.wait()
if self.failExcept != None:
raise failExcept
| true | true |
1c3ce643df3aaffc76fee87b2c0f49e5b6aefc57 | 421 | py | Python | students/K33421/Kustova_Ekaterina/Lr2/Practice2.1-2.3/django_project_kustova/django_project_kustova/wsgi.py | IJustWantToSleep/ITMO_ICT_WebDevelopment_2020-2021 | 90921730362d14ac5e03268baab1a479c39d578d | [
"MIT"
] | null | null | null | students/K33421/Kustova_Ekaterina/Lr2/Practice2.1-2.3/django_project_kustova/django_project_kustova/wsgi.py | IJustWantToSleep/ITMO_ICT_WebDevelopment_2020-2021 | 90921730362d14ac5e03268baab1a479c39d578d | [
"MIT"
] | null | null | null | students/K33421/Kustova_Ekaterina/Lr2/Practice2.1-2.3/django_project_kustova/django_project_kustova/wsgi.py | IJustWantToSleep/ITMO_ICT_WebDevelopment_2020-2021 | 90921730362d14ac5e03268baab1a479c39d578d | [
"MIT"
] | null | null | null | """
WSGI config for django_project_kustova project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_kustova.settings')
application = get_wsgi_application()
| 24.764706 | 82 | 0.800475 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_project_kustova.settings')
application = get_wsgi_application()
| true | true |
1c3ce67637ade16a79a9930c155025f80042e440 | 2,815 | py | Python | twitterScraping/get_metadata.py | nmestrada/TrumpTweetGenerator | cf3c4c8abfd747e59ae89869b738a79c001d84d4 | [
"MIT"
] | null | null | null | twitterScraping/get_metadata.py | nmestrada/TrumpTweetGenerator | cf3c4c8abfd747e59ae89869b738a79c001d84d4 | [
"MIT"
] | null | null | null | twitterScraping/get_metadata.py | nmestrada/TrumpTweetGenerator | cf3c4c8abfd747e59ae89869b738a79c001d84d4 | [
"MIT"
] | null | null | null | import tweepy
import json
import math
import glob
import csv
import zipfile
import zlib
from tweepy import TweepError
from time import sleep
# CHANGE THIS TO THE USER YOU WANT
user = 'realdonaldtrump'
with open('api_keys.json') as f:
keys = json.load(f)
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)
user = user.lower()
output_file = '{}.json'.format(user)
output_file_short = '{}_short.json'.format(user)
compression = zipfile.ZIP_DEFLATED
with open('all_ids.json') as f:
ids = json.load(f)
print('total ids: {}'.format(len(ids)))
all_data = []
start = 0
end = 100
limit = len(ids)
i = math.ceil(limit / 100)
for go in range(i):
print('currently getting {} - {}'.format(start, end))
sleep(6) # needed to prevent hitting API rate limit
id_batch = ids[start:end]
start += 100
end += 100
tweets = api.statuses_lookup(id_batch, tweet_mode='extended')
for tweet in tweets:
all_data.append(dict(tweet._json))
print('metadata collection complete')
print('creating master json file')
with open(output_file, 'w') as outfile:
json.dump(all_data, outfile)
print('creating ziped master json file')
zf = zipfile.ZipFile('{}.zip'.format(user), mode='w')
zf.write(output_file, compress_type=compression)
zf.close()
results = []
def is_retweet(entry):
return 'retweeted_status' in entry.keys()
def get_source(entry):
if '<' in entry["source"]:
return entry["source"].split('>')[1].split('<')[0]
else:
return entry["source"]
with open(output_file) as json_data:
data = json.load(json_data)
for entry in data:
t = {
"created_at": entry["created_at"],
"text": entry["full_text"],
"in_reply_to_screen_name": entry["in_reply_to_screen_name"],
"retweet_count": entry["retweet_count"],
"favorite_count": entry["favorite_count"],
"source": get_source(entry),
"id_str": entry["id_str"],
"is_retweet": is_retweet(entry)
}
results.append(t)
print('creating minimized json master file')
with open(output_file_short, 'w') as outfile:
json.dump(results, outfile)
with open(output_file_short) as master_file:
data = json.load(master_file)
fields = ["favorite_count", "source", "text", "in_reply_to_screen_name", "is_retweet", "created_at", "retweet_count", "id_str"]
print('creating CSV version of minimized json master file')
f = csv.writer(open('{}.csv'.format(user), 'w'))
f.writerow(fields)
for x in data:
f.writerow([x["favorite_count"], x["source"], x["text"], x["in_reply_to_screen_name"], x["is_retweet"], x["created_at"], x["retweet_count"], x["id_str"]])
| 29.946809 | 162 | 0.66643 | import tweepy
import json
import math
import glob
import csv
import zipfile
import zlib
from tweepy import TweepError
from time import sleep
user = 'realdonaldtrump'
with open('api_keys.json') as f:
keys = json.load(f)
auth = tweepy.OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
api = tweepy.API(auth)
user = user.lower()
output_file = '{}.json'.format(user)
output_file_short = '{}_short.json'.format(user)
compression = zipfile.ZIP_DEFLATED
with open('all_ids.json') as f:
ids = json.load(f)
print('total ids: {}'.format(len(ids)))
all_data = []
start = 0
end = 100
limit = len(ids)
i = math.ceil(limit / 100)
for go in range(i):
print('currently getting {} - {}'.format(start, end))
sleep(6)
id_batch = ids[start:end]
start += 100
end += 100
tweets = api.statuses_lookup(id_batch, tweet_mode='extended')
for tweet in tweets:
all_data.append(dict(tweet._json))
print('metadata collection complete')
print('creating master json file')
with open(output_file, 'w') as outfile:
json.dump(all_data, outfile)
print('creating ziped master json file')
zf = zipfile.ZipFile('{}.zip'.format(user), mode='w')
zf.write(output_file, compress_type=compression)
zf.close()
results = []
def is_retweet(entry):
return 'retweeted_status' in entry.keys()
def get_source(entry):
if '<' in entry["source"]:
return entry["source"].split('>')[1].split('<')[0]
else:
return entry["source"]
with open(output_file) as json_data:
data = json.load(json_data)
for entry in data:
t = {
"created_at": entry["created_at"],
"text": entry["full_text"],
"in_reply_to_screen_name": entry["in_reply_to_screen_name"],
"retweet_count": entry["retweet_count"],
"favorite_count": entry["favorite_count"],
"source": get_source(entry),
"id_str": entry["id_str"],
"is_retweet": is_retweet(entry)
}
results.append(t)
print('creating minimized json master file')
with open(output_file_short, 'w') as outfile:
json.dump(results, outfile)
with open(output_file_short) as master_file:
data = json.load(master_file)
fields = ["favorite_count", "source", "text", "in_reply_to_screen_name", "is_retweet", "created_at", "retweet_count", "id_str"]
print('creating CSV version of minimized json master file')
f = csv.writer(open('{}.csv'.format(user), 'w'))
f.writerow(fields)
for x in data:
f.writerow([x["favorite_count"], x["source"], x["text"], x["in_reply_to_screen_name"], x["is_retweet"], x["created_at"], x["retweet_count"], x["id_str"]])
| true | true |
1c3ce68283063859a2a1f1f6739ba3f341778d3c | 15,494 | py | Python | selfdrive/controls/lib/lateral_planner.py | kansakitw/dragonpilotamd | 83295e6746e685b22e218bd0bd943df674e42a81 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lateral_planner.py | kansakitw/dragonpilotamd | 83295e6746e685b22e218bd0bd943df674e42a81 | [
"MIT"
] | null | null | null | selfdrive/controls/lib/lateral_planner.py | kansakitw/dragonpilotamd | 83295e6746e685b22e218bd0bd943df674e42a81 | [
"MIT"
] | null | null | null | import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LANE_CHANGE_SPEED_MIN = 30 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.keep_pulse_timer = 0.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
self.d_path_w_lines_xyz = np.zeros((TRAJECTORY_SIZE, 3))
# dp
self.dp_torque_apply_length = 1.5 # secs of torque we apply for
self.dp_lc_auto_start = 0. # time to start alc
self.dp_lc_auto_start_in = 0. # remaining time to start alc
self.dp_lc_auto_torque_end = 0. # time to end applying torque
self.dp_torque_apply = False # should we apply torque?
self.laneless_mode = 2 # AUTO
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
self.use_lanelines = not sm['dragonConf'].dpLaneLessModeCtrl
self.laneless_mode = sm['dragonConf'].dpLaneLessMode
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
# self.LP.update_dp_set_offsets(sm['dragonConf'].dpCameraOffset, sm['dragonConf'].dpPathOffset)
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
# Lane change logic
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < (sm['dragonConf'].dpLcMinMph * CV.MPH_TO_MS)
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
reset = False
if one_blinker:
cur_time = sec_since_boot()
# reach auto lc condition
if not below_lane_change_speed and sm['dragonConf'].dpLateralMode == 2 and v_ego >= (sm['dragonConf'].dpLcAutoMinMph * CV.MPH_TO_MS):
# work out alc start time and torque apply end time
if self.dp_lc_auto_start == 0.:
self.dp_lc_auto_start = cur_time + sm['dragonConf'].dpLcAutoDelay
self.dp_lc_auto_torque_end = self.dp_lc_auto_start + self.dp_torque_apply_length
else:
# work out how long til alc start
# for display only
self.dp_lc_auto_start_in = self.dp_lc_auto_start - cur_time
self.dp_torque_apply = True if self.dp_lc_auto_start < cur_time <= self.dp_lc_auto_torque_end else False
else:
reset = True
# reset all vals
if not active or reset:
self.dp_lc_auto_start = 0.
self.dp_lc_auto_start_in = 0.
self.dp_lc_auto_torque_end = 0.
self.dp_torque_apply = False
# LaneChangeState.off
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
# LaneChangeState.preLaneChange
elif self.lane_change_state == LaneChangeState.preLaneChange:
# Set lane change direction
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
else: # If there are no blinkers we will go back to LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
# if human made lane change prior alca, we should stop alca until new blinker (off -> on)
self.dp_lc_auto_start = self.dp_lc_auto_torque_end if torque_applied else self.dp_lc_auto_start
torque_applied = self.dp_torque_apply if self.dp_torque_apply else torque_applied
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
# LaneChangeState.laneChangeStarting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
# fade out over .5s
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
# 98% certainty
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
# LaneChangeState.laneChangeFinishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
# fade in laneline over 1s
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
# Send keep pulse once per second during LaneChangeStart.preLaneChange
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:
self.keep_pulse_timer = 0.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
self.keep_pulse_timer += DT_MDL
if self.keep_pulse_timer > 1.0:
self.keep_pulse_timer = 0.0
elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:
self.desire = log.LateralPlan.Desire.none
# Turn off lanes during lane change
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
self.d_path_w_lines_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
if self.use_lanelines:
d_path_xyz = self.d_path_w_lines_xyz
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
elif self.laneless_mode == 0:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
elif self.laneless_mode == 1:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
elif self.laneless_mode == 2 and ((self.LP.lll_prob + self.LP.rll_prob)/2 < 0.3) and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
self.laneless_mode_status_buffer = True
elif self.laneless_mode == 2 and ((self.LP.lll_prob + self.LP.rll_prob)/2 > 0.5) and \
self.laneless_mode_status_buffer and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
elif self.laneless_mode == 2 and self.laneless_mode_status_buffer == True and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
# Heading cost is useful at low speed, otherwise end of plan can be off-heading
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
else:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
# for now CAR_ROTATION_RADIUS is disabled
# to use it, enable it in the MPC
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
# init state for next
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
# Check for infeasable MPC solution
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans: # TODO: find a better way to detect when MPC did not converge
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2', 'dragonConf'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
plan_send.lateralPlan.dpALCAStartIn = self.dp_lc_auto_start_in
plan_send.lateralPlan.dPathWLinesX = [float(x) for x in self.d_path_w_lines_xyz[:, 0]]
plan_send.lateralPlan.dPathWLinesY = [float(y) for y in self.d_path_w_lines_xyz[:, 1]]
plan_send.lateralPlan.dpLaneLessModeStatus = bool(self.laneless_mode_status)
plan_send.lateralPlan.dPathWLinesX = [float(x) for x in self.d_path_w_lines_xyz[:, 0]]
plan_send.lateralPlan.dPathWLinesY = [float(y) for y in self.d_path_w_lines_xyz[:, 1]]
pm.send('lateralPlan', plan_send)
| 48.118012 | 141 | 0.723764 | import math
import numpy as np
from common.realtime import sec_since_boot, DT_MDL
from common.numpy_fast import interp
from selfdrive.swaglog import cloudlog
from selfdrive.controls.lib.lateral_mpc import libmpc_py
from selfdrive.controls.lib.drive_helpers import CONTROL_N, MPC_COST_LAT, LAT_MPC_N, CAR_ROTATION_RADIUS
from selfdrive.controls.lib.lane_planner import LanePlanner, TRAJECTORY_SIZE
from selfdrive.config import Conversions as CV
import cereal.messaging as messaging
from cereal import log
LaneChangeState = log.LateralPlan.LaneChangeState
LaneChangeDirection = log.LateralPlan.LaneChangeDirection
LANE_CHANGE_SPEED_MIN = 30 * CV.MPH_TO_MS
LANE_CHANGE_TIME_MAX = 10.
DESIRES = {
LaneChangeDirection.none: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.none,
},
LaneChangeDirection.left: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeLeft,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeLeft,
},
LaneChangeDirection.right: {
LaneChangeState.off: log.LateralPlan.Desire.none,
LaneChangeState.preLaneChange: log.LateralPlan.Desire.none,
LaneChangeState.laneChangeStarting: log.LateralPlan.Desire.laneChangeRight,
LaneChangeState.laneChangeFinishing: log.LateralPlan.Desire.laneChangeRight,
},
}
class LateralPlanner():
def __init__(self, CP, use_lanelines=True, wide_camera=False):
self.use_lanelines = use_lanelines
self.LP = LanePlanner(wide_camera)
self.last_cloudlog_t = 0
self.steer_rate_cost = CP.steerRateCost
self.setup_mpc()
self.solution_invalid_cnt = 0
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
self.lane_change_timer = 0.0
self.lane_change_ll_prob = 1.0
self.keep_pulse_timer = 0.0
self.prev_one_blinker = False
self.desire = log.LateralPlan.Desire.none
self.path_xyz = np.zeros((TRAJECTORY_SIZE,3))
self.path_xyz_stds = np.ones((TRAJECTORY_SIZE,3))
self.plan_yaw = np.zeros((TRAJECTORY_SIZE,))
self.t_idxs = np.arange(TRAJECTORY_SIZE)
self.y_pts = np.zeros(TRAJECTORY_SIZE)
self.d_path_w_lines_xyz = np.zeros((TRAJECTORY_SIZE, 3))
self.dp_torque_apply_length = 1.5
self.dp_lc_auto_start = 0.
self.dp_lc_auto_start_in = 0.
self.dp_lc_auto_torque_end = 0.
self.dp_torque_apply = False
self.laneless_mode = 2
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
def setup_mpc(self):
self.libmpc = libmpc_py.libmpc
self.libmpc.init()
self.mpc_solution = libmpc_py.ffi.new("log_t *")
self.cur_state = libmpc_py.ffi.new("state_t *")
self.cur_state[0].x = 0.0
self.cur_state[0].y = 0.0
self.cur_state[0].psi = 0.0
self.cur_state[0].curvature = 0.0
self.desired_curvature = 0.0
self.safe_desired_curvature = 0.0
self.desired_curvature_rate = 0.0
self.safe_desired_curvature_rate = 0.0
def update(self, sm, CP):
self.use_lanelines = not sm['dragonConf'].dpLaneLessModeCtrl
self.laneless_mode = sm['dragonConf'].dpLaneLessMode
v_ego = sm['carState'].vEgo
active = sm['controlsState'].active
measured_curvature = sm['controlsState'].curvature
md = sm['modelV2']
self.LP.parse_model(sm['modelV2'])
if len(md.position.x) == TRAJECTORY_SIZE and len(md.orientation.x) == TRAJECTORY_SIZE:
self.path_xyz = np.column_stack([md.position.x, md.position.y, md.position.z])
self.t_idxs = np.array(md.position.t)
self.plan_yaw = list(md.orientation.z)
if len(md.orientation.xStd) == TRAJECTORY_SIZE:
self.path_xyz_stds = np.column_stack([md.position.xStd, md.position.yStd, md.position.zStd])
one_blinker = sm['carState'].leftBlinker != sm['carState'].rightBlinker
below_lane_change_speed = v_ego < (sm['dragonConf'].dpLcMinMph * CV.MPH_TO_MS)
if (not active) or (self.lane_change_timer > LANE_CHANGE_TIME_MAX):
self.lane_change_state = LaneChangeState.off
self.lane_change_direction = LaneChangeDirection.none
else:
reset = False
if one_blinker:
cur_time = sec_since_boot()
if not below_lane_change_speed and sm['dragonConf'].dpLateralMode == 2 and v_ego >= (sm['dragonConf'].dpLcAutoMinMph * CV.MPH_TO_MS):
if self.dp_lc_auto_start == 0.:
self.dp_lc_auto_start = cur_time + sm['dragonConf'].dpLcAutoDelay
self.dp_lc_auto_torque_end = self.dp_lc_auto_start + self.dp_torque_apply_length
else:
self.dp_lc_auto_start_in = self.dp_lc_auto_start - cur_time
self.dp_torque_apply = True if self.dp_lc_auto_start < cur_time <= self.dp_lc_auto_torque_end else False
else:
reset = True
if not active or reset:
self.dp_lc_auto_start = 0.
self.dp_lc_auto_start_in = 0.
self.dp_lc_auto_torque_end = 0.
self.dp_torque_apply = False
if self.lane_change_state == LaneChangeState.off and one_blinker and not self.prev_one_blinker and not below_lane_change_speed:
self.lane_change_state = LaneChangeState.preLaneChange
self.lane_change_ll_prob = 1.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
if sm['carState'].leftBlinker:
self.lane_change_direction = LaneChangeDirection.left
elif sm['carState'].rightBlinker:
self.lane_change_direction = LaneChangeDirection.right
else:
self.lane_change_direction = LaneChangeDirection.none
torque_applied = sm['carState'].steeringPressed and \
((sm['carState'].steeringTorque > 0 and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].steeringTorque < 0 and self.lane_change_direction == LaneChangeDirection.right))
blindspot_detected = ((sm['carState'].leftBlindspot and self.lane_change_direction == LaneChangeDirection.left) or
(sm['carState'].rightBlindspot and self.lane_change_direction == LaneChangeDirection.right))
self.dp_lc_auto_start = self.dp_lc_auto_torque_end if torque_applied else self.dp_lc_auto_start
torque_applied = self.dp_torque_apply if self.dp_torque_apply else torque_applied
if not one_blinker or below_lane_change_speed:
self.lane_change_state = LaneChangeState.off
elif torque_applied and not blindspot_detected:
self.lane_change_state = LaneChangeState.laneChangeStarting
elif self.lane_change_state == LaneChangeState.laneChangeStarting:
self.lane_change_ll_prob = max(self.lane_change_ll_prob - 2*DT_MDL, 0.0)
lane_change_prob = self.LP.l_lane_change_prob + self.LP.r_lane_change_prob
if lane_change_prob < 0.02 and self.lane_change_ll_prob < 0.01:
self.lane_change_state = LaneChangeState.laneChangeFinishing
elif self.lane_change_state == LaneChangeState.laneChangeFinishing:
self.lane_change_ll_prob = min(self.lane_change_ll_prob + DT_MDL, 1.0)
if one_blinker and self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.preLaneChange
elif self.lane_change_ll_prob > 0.99:
self.lane_change_state = LaneChangeState.off
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.preLaneChange]:
self.lane_change_timer = 0.0
else:
self.lane_change_timer += DT_MDL
self.prev_one_blinker = one_blinker
self.desire = DESIRES[self.lane_change_direction][self.lane_change_state]
if self.lane_change_state in [LaneChangeState.off, LaneChangeState.laneChangeStarting]:
self.keep_pulse_timer = 0.0
elif self.lane_change_state == LaneChangeState.preLaneChange:
self.keep_pulse_timer += DT_MDL
if self.keep_pulse_timer > 1.0:
self.keep_pulse_timer = 0.0
elif self.desire in [log.LateralPlan.Desire.keepLeft, log.LateralPlan.Desire.keepRight]:
self.desire = log.LateralPlan.Desire.none
if self.desire == log.LateralPlan.Desire.laneChangeRight or self.desire == log.LateralPlan.Desire.laneChangeLeft:
self.LP.lll_prob *= self.lane_change_ll_prob
self.LP.rll_prob *= self.lane_change_ll_prob
self.d_path_w_lines_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
if self.use_lanelines:
d_path_xyz = self.d_path_w_lines_xyz
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
elif self.laneless_mode == 0:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
elif self.laneless_mode == 1:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
elif self.laneless_mode == 2 and ((self.LP.lll_prob + self.LP.rll_prob)/2 < 0.3) and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
self.laneless_mode_status_buffer = True
elif self.laneless_mode == 2 and ((self.LP.lll_prob + self.LP.rll_prob)/2 > 0.5) and \
self.laneless_mode_status_buffer and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
elif self.laneless_mode == 2 and self.laneless_mode_status_buffer == True and self.lane_change_state == LaneChangeState.off:
d_path_xyz = self.path_xyz
path_cost = np.clip(abs(self.path_xyz[0,1]/self.path_xyz_stds[0,1]), 0.5, 5.0) * MPC_COST_LAT.PATH
heading_cost = interp(v_ego, [5.0, 10.0], [MPC_COST_LAT.HEADING, 0.0])
self.libmpc.set_weights(path_cost, heading_cost, CP.steerRateCost)
self.laneless_mode_status = True
else:
d_path_xyz = self.LP.get_d_path(v_ego, self.t_idxs, self.path_xyz)
self.libmpc.set_weights(MPC_COST_LAT.PATH, MPC_COST_LAT.HEADING, CP.steerRateCost)
self.laneless_mode_status = False
self.laneless_mode_status_buffer = False
y_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(d_path_xyz, axis=1), d_path_xyz[:,1])
heading_pts = np.interp(v_ego * self.t_idxs[:LAT_MPC_N + 1], np.linalg.norm(self.path_xyz, axis=1), self.plan_yaw)
self.y_pts = y_pts
assert len(y_pts) == LAT_MPC_N + 1
assert len(heading_pts) == LAT_MPC_N + 1
assert abs(CAR_ROTATION_RADIUS) < 1e-3
self.libmpc.run_mpc(self.cur_state, self.mpc_solution,
float(v_ego),
CAR_ROTATION_RADIUS,
list(y_pts),
list(heading_pts))
self.cur_state.x = 0.0
self.cur_state.y = 0.0
self.cur_state.psi = 0.0
self.cur_state.curvature = interp(DT_MDL, self.t_idxs[:LAT_MPC_N + 1], self.mpc_solution.curvature)
mpc_nans = any(math.isnan(x) for x in self.mpc_solution.curvature)
t = sec_since_boot()
if mpc_nans:
self.libmpc.init()
self.cur_state.curvature = measured_curvature
if t > self.last_cloudlog_t + 5.0:
self.last_cloudlog_t = t
cloudlog.warning("Lateral mpc - nan: True")
if self.mpc_solution[0].cost > 20000. or mpc_nans:
self.solution_invalid_cnt += 1
else:
self.solution_invalid_cnt = 0
def publish(self, sm, pm):
plan_solution_valid = self.solution_invalid_cnt < 2
plan_send = messaging.new_message('lateralPlan')
plan_send.valid = sm.all_alive_and_valid(service_list=['carState', 'controlsState', 'modelV2', 'dragonConf'])
plan_send.lateralPlan.laneWidth = float(self.LP.lane_width)
plan_send.lateralPlan.dPathPoints = [float(x) for x in self.y_pts]
plan_send.lateralPlan.psis = [float(x) for x in self.mpc_solution.psi[0:CONTROL_N]]
plan_send.lateralPlan.curvatures = [float(x) for x in self.mpc_solution.curvature[0:CONTROL_N]]
plan_send.lateralPlan.curvatureRates = [float(x) for x in self.mpc_solution.curvature_rate[0:CONTROL_N-1]] +[0.0]
plan_send.lateralPlan.lProb = float(self.LP.lll_prob)
plan_send.lateralPlan.rProb = float(self.LP.rll_prob)
plan_send.lateralPlan.dProb = float(self.LP.d_prob)
plan_send.lateralPlan.mpcSolutionValid = bool(plan_solution_valid)
plan_send.lateralPlan.desire = self.desire
plan_send.lateralPlan.laneChangeState = self.lane_change_state
plan_send.lateralPlan.laneChangeDirection = self.lane_change_direction
plan_send.lateralPlan.dpALCAStartIn = self.dp_lc_auto_start_in
plan_send.lateralPlan.dPathWLinesX = [float(x) for x in self.d_path_w_lines_xyz[:, 0]]
plan_send.lateralPlan.dPathWLinesY = [float(y) for y in self.d_path_w_lines_xyz[:, 1]]
plan_send.lateralPlan.dpLaneLessModeStatus = bool(self.laneless_mode_status)
plan_send.lateralPlan.dPathWLinesX = [float(x) for x in self.d_path_w_lines_xyz[:, 0]]
plan_send.lateralPlan.dPathWLinesY = [float(y) for y in self.d_path_w_lines_xyz[:, 1]]
pm.send('lateralPlan', plan_send)
| true | true |
1c3ce6e8bf1faf14cc83cc10b3fe13b90c30ecf5 | 5,341 | py | Python | archivebox/index/json.py | BlipRanger/ArchiveBox | 6f462b45d7dd6bc5a0d49a3329c592d32c610b9f | [
"MIT"
] | 1 | 2020-11-22T21:26:44.000Z | 2020-11-22T21:26:44.000Z | archivebox/index/json.py | extratone/ArchiveBox | b82737cc4dafca49d42edd3cdfd46cf7d5b7c6c1 | [
"MIT"
] | 5 | 2021-03-30T14:20:46.000Z | 2021-09-22T19:41:14.000Z | archivebox/index/json.py | extratone/ArchiveBox | b82737cc4dafca49d42edd3cdfd46cf7d5b7c6c1 | [
"MIT"
] | null | null | null | __package__ = 'archivebox.index'
import os
import sys
import json as pyjson
from pathlib import Path
from datetime import datetime
from typing import List, Optional, Iterator, Any, Union
from .schema import Link, ArchiveResult
from ..system import atomic_write
from ..util import enforce_types
from ..config import (
VERSION,
OUTPUT_DIR,
FOOTER_INFO,
GIT_SHA,
DEPENDENCIES,
JSON_INDEX_FILENAME,
ARCHIVE_DIR_NAME,
ANSI
)
MAIN_INDEX_HEADER = {
'info': 'This is an index of site data archived by ArchiveBox: The self-hosted web archive.',
'schema': 'archivebox.index.json',
'copyright_info': FOOTER_INFO,
'meta': {
'project': 'ArchiveBox',
'version': VERSION,
'git_sha': GIT_SHA,
'website': 'https://ArchiveBox.io',
'docs': 'https://github.com/ArchiveBox/ArchiveBox/wiki',
'source': 'https://github.com/ArchiveBox/ArchiveBox',
'issues': 'https://github.com/ArchiveBox/ArchiveBox/issues',
'dependencies': DEPENDENCIES,
},
}
### Main Links Index
@enforce_types
def parse_json_main_index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]:
"""parse an archive index json file and return the list of links"""
index_path = Path(out_dir) / JSON_INDEX_FILENAME
if index_path.exists():
with open(index_path, 'r', encoding='utf-8') as f:
links = pyjson.load(f)['links']
for link_json in links:
try:
yield Link.from_json(link_json)
except KeyError:
try:
detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp']
yield parse_json_link_details(str(detail_index_path))
except KeyError:
# as a last effort, try to guess the missing values out of existing ones
try:
yield Link.from_json(link_json, guess=True)
except KeyError:
print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI))
continue
return ()
@enforce_types
def write_json_main_index(links: List[Link], out_dir: Path=OUTPUT_DIR) -> None:
"""write the json link index to a given path"""
assert isinstance(links, List), 'Links must be a list, not a generator.'
assert not links or isinstance(links[0].history, dict)
assert not links or isinstance(links[0].sources, list)
if links and links[0].history.get('title'):
assert isinstance(links[0].history['title'][0], ArchiveResult)
if links and links[0].sources:
assert isinstance(links[0].sources[0], str)
main_index_json = {
**MAIN_INDEX_HEADER,
'num_links': len(links),
'updated': datetime.now(),
'last_run_cmd': sys.argv,
'links': links,
}
atomic_write(str(Path(out_dir) / JSON_INDEX_FILENAME), main_index_json)
### Link Details Index
@enforce_types
def write_json_link_details(link: Link, out_dir: Optional[str]=None) -> None:
"""write a json file with some info about the link"""
out_dir = out_dir or link.link_dir
path = Path(out_dir) / JSON_INDEX_FILENAME
atomic_write(str(path), link._asdict(extended=True))
@enforce_types
def parse_json_link_details(out_dir: Union[Path, str], guess: Optional[bool]=False) -> Optional[Link]:
"""load the json link index from a given directory"""
existing_index = Path(out_dir) / JSON_INDEX_FILENAME
if existing_index.exists():
with open(existing_index, 'r', encoding='utf-8') as f:
try:
link_json = pyjson.load(f)
return Link.from_json(link_json, guess)
except pyjson.JSONDecodeError:
pass
return None
@enforce_types
def parse_json_links_details(out_dir: Union[Path, str]) -> Iterator[Link]:
"""read through all the archive data folders and return the parsed links"""
for entry in os.scandir(Path(out_dir) / ARCHIVE_DIR_NAME):
if entry.is_dir(follow_symlinks=True):
if (Path(entry.path) / 'index.json').exists():
try:
link = parse_json_link_details(entry.path)
except KeyError:
link = None
if link:
yield link
### Helpers
class ExtendedEncoder(pyjson.JSONEncoder):
"""
Extended json serializer that supports serializing several model
fields and objects
"""
def default(self, obj):
cls_name = obj.__class__.__name__
if hasattr(obj, '_asdict'):
return obj._asdict()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, Exception):
return '{}: {}'.format(obj.__class__.__name__, obj)
elif cls_name in ('dict_items', 'dict_keys', 'dict_values'):
return tuple(obj)
return pyjson.JSONEncoder.default(self, obj)
@enforce_types
def to_json(obj: Any, indent: Optional[int]=4, sort_keys: bool=True, cls=ExtendedEncoder) -> str:
return pyjson.dumps(obj, indent=indent, sort_keys=sort_keys, cls=ExtendedEncoder)
| 32.174699 | 127 | 0.625164 | __package__ = 'archivebox.index'
import os
import sys
import json as pyjson
from pathlib import Path
from datetime import datetime
from typing import List, Optional, Iterator, Any, Union
from .schema import Link, ArchiveResult
from ..system import atomic_write
from ..util import enforce_types
from ..config import (
VERSION,
OUTPUT_DIR,
FOOTER_INFO,
GIT_SHA,
DEPENDENCIES,
JSON_INDEX_FILENAME,
ARCHIVE_DIR_NAME,
ANSI
)
MAIN_INDEX_HEADER = {
'info': 'This is an index of site data archived by ArchiveBox: The self-hosted web archive.',
'schema': 'archivebox.index.json',
'copyright_info': FOOTER_INFO,
'meta': {
'project': 'ArchiveBox',
'version': VERSION,
'git_sha': GIT_SHA,
'website': 'https://ArchiveBox.io',
'docs': 'https://github.com/ArchiveBox/ArchiveBox/wiki',
'source': 'https://github.com/ArchiveBox/ArchiveBox',
'issues': 'https://github.com/ArchiveBox/ArchiveBox/issues',
'dependencies': DEPENDENCIES,
},
}
index(out_dir: Path=OUTPUT_DIR) -> Iterator[Link]:
index_path = Path(out_dir) / JSON_INDEX_FILENAME
if index_path.exists():
with open(index_path, 'r', encoding='utf-8') as f:
links = pyjson.load(f)['links']
for link_json in links:
try:
yield Link.from_json(link_json)
except KeyError:
try:
detail_index_path = Path(OUTPUT_DIR) / ARCHIVE_DIR_NAME / link_json['timestamp']
yield parse_json_link_details(str(detail_index_path))
except KeyError:
try:
yield Link.from_json(link_json, guess=True)
except KeyError:
print(" {lightyellow}! Failed to load the index.json from {}".format(detail_index_path, **ANSI))
continue
return ()
@enforce_types
def write_json_main_index(links: List[Link], out_dir: Path=OUTPUT_DIR) -> None:
assert isinstance(links, List), 'Links must be a list, not a generator.'
assert not links or isinstance(links[0].history, dict)
assert not links or isinstance(links[0].sources, list)
if links and links[0].history.get('title'):
assert isinstance(links[0].history['title'][0], ArchiveResult)
if links and links[0].sources:
assert isinstance(links[0].sources[0], str)
main_index_json = {
**MAIN_INDEX_HEADER,
'num_links': len(links),
'updated': datetime.now(),
'last_run_cmd': sys.argv,
'links': links,
}
atomic_write(str(Path(out_dir) / JSON_INDEX_FILENAME), main_index_json)
ils(link: Link, out_dir: Optional[str]=None) -> None:
out_dir = out_dir or link.link_dir
path = Path(out_dir) / JSON_INDEX_FILENAME
atomic_write(str(path), link._asdict(extended=True))
@enforce_types
def parse_json_link_details(out_dir: Union[Path, str], guess: Optional[bool]=False) -> Optional[Link]:
existing_index = Path(out_dir) / JSON_INDEX_FILENAME
if existing_index.exists():
with open(existing_index, 'r', encoding='utf-8') as f:
try:
link_json = pyjson.load(f)
return Link.from_json(link_json, guess)
except pyjson.JSONDecodeError:
pass
return None
@enforce_types
def parse_json_links_details(out_dir: Union[Path, str]) -> Iterator[Link]:
for entry in os.scandir(Path(out_dir) / ARCHIVE_DIR_NAME):
if entry.is_dir(follow_symlinks=True):
if (Path(entry.path) / 'index.json').exists():
try:
link = parse_json_link_details(entry.path)
except KeyError:
link = None
if link:
yield link
oder(pyjson.JSONEncoder):
def default(self, obj):
cls_name = obj.__class__.__name__
if hasattr(obj, '_asdict'):
return obj._asdict()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, datetime):
return obj.isoformat()
elif isinstance(obj, Exception):
return '{}: {}'.format(obj.__class__.__name__, obj)
elif cls_name in ('dict_items', 'dict_keys', 'dict_values'):
return tuple(obj)
return pyjson.JSONEncoder.default(self, obj)
@enforce_types
def to_json(obj: Any, indent: Optional[int]=4, sort_keys: bool=True, cls=ExtendedEncoder) -> str:
return pyjson.dumps(obj, indent=indent, sort_keys=sort_keys, cls=ExtendedEncoder)
| true | true |
1c3ce7760c9ae2c72d27fa3f5f35f7f24304221e | 1,604 | py | Python | source/utils/message.py | hytalo-bassi/aeternah | 82f21d6f6acb01ce50f036702d8878ca7b3c998d | [
"MIT"
] | 1 | 2021-06-28T15:33:56.000Z | 2021-06-28T15:33:56.000Z | source/utils/message.py | hytalo-bassi/aeternah | 82f21d6f6acb01ce50f036702d8878ca7b3c998d | [
"MIT"
] | null | null | null | source/utils/message.py | hytalo-bassi/aeternah | 82f21d6f6acb01ce50f036702d8878ca7b3c998d | [
"MIT"
] | 2 | 2021-05-25T20:20:42.000Z | 2021-06-28T15:33:58.000Z | from aiogram import types
from source.setup import bot, _langs
from .lang import Lang
class Message:
def __init__(self, message: types.Message, opt: dict):
self.message: types.Message = message
self.lang = Lang(_langs, message.from_user.language_code)
self.opt: dict = {
"is_reply": opt['is_reply'] or False,
"permissions": opt['permissions'] or [],
"is_group": opt['is_group'] or False
}
def main_text(self):
if self.message.reply_to_message:
return self.message.reply_to_message.text
if self.message.caption:
return self.message.caption
return self.message.text
async def execute_function(self, callback):
chat: types.Chat = await bot.get_chat(self.message.chat.id)
member: types.ChatMember = await bot.get_chat_member(
self.message.chat.id,
self.message.from_user.id
)
if self.opt['is_reply'] and not self.message.reply_to_message:
return await self.message.reply(self.lang._lang["reply_to_message"])
elif self.opt['is_group'] and chat.type not in ["supergroup", "group"]:
return await self.message.reply(self.lang._lang["only_groups"])
elif len(self.opt["permissions"]) != 0 and member['status'] != "creator":
for permission in self.opt['permissions']:
if not member[permission] or member[permission] == None:
return await self.message.reply(self.lang._lang["right_permissions"])
return await callback(self.message) | 42.210526 | 89 | 0.63591 | from aiogram import types
from source.setup import bot, _langs
from .lang import Lang
class Message:
def __init__(self, message: types.Message, opt: dict):
self.message: types.Message = message
self.lang = Lang(_langs, message.from_user.language_code)
self.opt: dict = {
"is_reply": opt['is_reply'] or False,
"permissions": opt['permissions'] or [],
"is_group": opt['is_group'] or False
}
def main_text(self):
if self.message.reply_to_message:
return self.message.reply_to_message.text
if self.message.caption:
return self.message.caption
return self.message.text
async def execute_function(self, callback):
chat: types.Chat = await bot.get_chat(self.message.chat.id)
member: types.ChatMember = await bot.get_chat_member(
self.message.chat.id,
self.message.from_user.id
)
if self.opt['is_reply'] and not self.message.reply_to_message:
return await self.message.reply(self.lang._lang["reply_to_message"])
elif self.opt['is_group'] and chat.type not in ["supergroup", "group"]:
return await self.message.reply(self.lang._lang["only_groups"])
elif len(self.opt["permissions"]) != 0 and member['status'] != "creator":
for permission in self.opt['permissions']:
if not member[permission] or member[permission] == None:
return await self.message.reply(self.lang._lang["right_permissions"])
return await callback(self.message) | true | true |
1c3ce780823d0d9210cded58f1d85223e308451a | 1,940 | py | Python | gogotest/gogotest/consqlite.py | meihuno/sandbox_scrapy | e88aba41b278c2a5a7585165ff3656ce8e89942b | [
"MIT"
] | null | null | null | gogotest/gogotest/consqlite.py | meihuno/sandbox_scrapy | e88aba41b278c2a5a7585165ff3656ce8e89942b | [
"MIT"
] | null | null | null | gogotest/gogotest/consqlite.py | meihuno/sandbox_scrapy | e88aba41b278c2a5a7585165ff3656ce8e89942b | [
"MIT"
] | null | null | null | import sqlite3
class DBConnection():
def __init__(self, dbpath='test.db') -> any:
dbname = dbpath
self.conn = sqlite3.connect(dbname)
# SQLiteを操作するためのカーソルを作成
self._create_db()
def _create_db(self):
# テーブル作成
cursor = self.conn.cursor()
cursor.execute(
'CREATE TABLE IF NOT EXISTS book(\
id INTEGER PRIMARY KEY AUTOINCREMENT, \
name TEXT NOT NULL, \
url TEXT UNIQUE NOT NULL, \
refurl TEXT, \
title TEXT NOT NULL \
);')
def close(self) -> any:
self.conn.close()
def save_book(self, item):
"""
item を DB に保存する
"""
if self.find_book(item['url']):
# 既に同じURLのデータが存在する場合はスキップ
return
self.conn.execute(
'INSERT INTO book (name, url, refurl, title) VALUES (?, ?, ?, ?)', (
item['name'],
item['url'],
item['refurl'],
item['title']
)
)
self.conn.commit()
def find_book(self, url):
cursor = self.conn.execute(
'SELECT * FROM book WHERE url=?',
(url,)
)
return cursor.fetchone()
def ret_find_book(self, name):
cursor = self.conn.execute(
'SELECT * FROM book WHERE name=?',
(name,)
)
rdict = {}
books = cursor.fetchall()
for book in books:
rdict[book[2]] = book[3]
return rdict
if __name__ == "__main__":
box = DBConnection()
item1 = {'name':'book', 'url': 'http://book1', 'title': 'book1', 'refurl': 'dummy'}
item2 = {'name':'book', 'url': 'http://book2', 'title': 'book2', 'refurl': 'dummy'}
box.save_book(item1)
box.save_book(item2)
rdict = box.ret_find_book('book')
# print(rdict)
box.close()
| 27.323944 | 87 | 0.487113 | import sqlite3
class DBConnection():
def __init__(self, dbpath='test.db') -> any:
dbname = dbpath
self.conn = sqlite3.connect(dbname)
self._create_db()
def _create_db(self):
cursor = self.conn.cursor()
cursor.execute(
'CREATE TABLE IF NOT EXISTS book(\
id INTEGER PRIMARY KEY AUTOINCREMENT, \
name TEXT NOT NULL, \
url TEXT UNIQUE NOT NULL, \
refurl TEXT, \
title TEXT NOT NULL \
);')
def close(self) -> any:
self.conn.close()
def save_book(self, item):
if self.find_book(item['url']):
return
self.conn.execute(
'INSERT INTO book (name, url, refurl, title) VALUES (?, ?, ?, ?)', (
item['name'],
item['url'],
item['refurl'],
item['title']
)
)
self.conn.commit()
def find_book(self, url):
cursor = self.conn.execute(
'SELECT * FROM book WHERE url=?',
(url,)
)
return cursor.fetchone()
def ret_find_book(self, name):
cursor = self.conn.execute(
'SELECT * FROM book WHERE name=?',
(name,)
)
rdict = {}
books = cursor.fetchall()
for book in books:
rdict[book[2]] = book[3]
return rdict
if __name__ == "__main__":
box = DBConnection()
item1 = {'name':'book', 'url': 'http://book1', 'title': 'book1', 'refurl': 'dummy'}
item2 = {'name':'book', 'url': 'http://book2', 'title': 'book2', 'refurl': 'dummy'}
box.save_book(item1)
box.save_book(item2)
rdict = box.ret_find_book('book')
box.close()
| true | true |
1c3ce82b1e4c7fffc470a4a82c810effcb2667d0 | 1,953 | py | Python | tests/contrib/validation/cerberus/test_validator_cerberus.py | vipulgupta2048/spidermon | b955d15acb5a933c56bc6f52cb34b644a13cf94f | [
"BSD-3-Clause"
] | 1 | 2019-08-04T07:49:34.000Z | 2019-08-04T07:49:34.000Z | tests/contrib/validation/cerberus/test_validator_cerberus.py | vipulgupta2048/spidermon | b955d15acb5a933c56bc6f52cb34b644a13cf94f | [
"BSD-3-Clause"
] | 4 | 2019-06-27T10:40:11.000Z | 2019-08-12T11:40:57.000Z | tests/contrib/validation/cerberus/test_validator_cerberus.py | vipulgupta2048/spidermon | b955d15acb5a933c56bc6f52cb34b644a13cf94f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import pytest
from cerberus.validator import DocumentError, SchemaError
from spidermon.contrib.validation import CerberusValidator
@pytest.mark.parametrize(
"data,schema",
[
pytest.param(
{"name": "foo", "number": 5},
{"name": {"typestring"}, "number": {"type": "integer"}},
id="Schema Invalid",
),
pytest.param(
["This is wrong"],
{"name": {"type": "string"}, "number": {"type": "integer"}},
id="Data invalid, not mapping type",
),
pytest.param(
["foo"],
{"name": {"x": "boo"}, "number": {"type": "integer"}},
id="Both schema, and data invalid",
),
],
)
def test_raise_value_error_with_invalid_schemas(data, schema):
validator = CerberusValidator(schema)
with pytest.raises(ValueError):
validator.validate(data)
@pytest.mark.parametrize(
"data,schema", [pytest.param(None, {"name": {"type": "string"}}, id="Missing Data")]
)
def test_document_error_with_missing_data(data, schema):
validator = CerberusValidator(schema)
with pytest.raises(DocumentError):
validator.validate(data)
@pytest.mark.parametrize(
"data,schema",
[pytest.param({"name": "foo", "number": 5}, None, id="Missing Schema")],
)
def test_schema_error_with_missing_schemas(data, schema):
with pytest.raises(SchemaError):
CerberusValidator(schema)
@pytest.mark.parametrize(
"data,schema,valid,expected_errors",
[
pytest.param(
{"name": "foo", "number": 5},
{"name": {"type": "string"}, "number": {"type": "integer"}},
True,
{},
id="Valid schema, data",
)
],
)
def test_valid_schemas(data, schema, valid, expected_errors):
validator = CerberusValidator(schema)
assert validator.validate(data) == (valid, expected_errors)
| 29.590909 | 88 | 0.600102 | from __future__ import absolute_import
import pytest
from cerberus.validator import DocumentError, SchemaError
from spidermon.contrib.validation import CerberusValidator
@pytest.mark.parametrize(
"data,schema",
[
pytest.param(
{"name": "foo", "number": 5},
{"name": {"typestring"}, "number": {"type": "integer"}},
id="Schema Invalid",
),
pytest.param(
["This is wrong"],
{"name": {"type": "string"}, "number": {"type": "integer"}},
id="Data invalid, not mapping type",
),
pytest.param(
["foo"],
{"name": {"x": "boo"}, "number": {"type": "integer"}},
id="Both schema, and data invalid",
),
],
)
def test_raise_value_error_with_invalid_schemas(data, schema):
validator = CerberusValidator(schema)
with pytest.raises(ValueError):
validator.validate(data)
@pytest.mark.parametrize(
"data,schema", [pytest.param(None, {"name": {"type": "string"}}, id="Missing Data")]
)
def test_document_error_with_missing_data(data, schema):
validator = CerberusValidator(schema)
with pytest.raises(DocumentError):
validator.validate(data)
@pytest.mark.parametrize(
"data,schema",
[pytest.param({"name": "foo", "number": 5}, None, id="Missing Schema")],
)
def test_schema_error_with_missing_schemas(data, schema):
with pytest.raises(SchemaError):
CerberusValidator(schema)
@pytest.mark.parametrize(
"data,schema,valid,expected_errors",
[
pytest.param(
{"name": "foo", "number": 5},
{"name": {"type": "string"}, "number": {"type": "integer"}},
True,
{},
id="Valid schema, data",
)
],
)
def test_valid_schemas(data, schema, valid, expected_errors):
validator = CerberusValidator(schema)
assert validator.validate(data) == (valid, expected_errors)
| true | true |
1c3ce8aa5069004e6ac17312a43a74bb0553e4b0 | 1,261 | py | Python | icebrk/fasthistos.py | bainbrid/icenet | 0b261dc97451fd7f896ed27f2b90dd2668e635ca | [
"MIT"
] | null | null | null | icebrk/fasthistos.py | bainbrid/icenet | 0b261dc97451fd7f896ed27f2b90dd2668e635ca | [
"MIT"
] | null | null | null | icebrk/fasthistos.py | bainbrid/icenet | 0b261dc97451fd7f896ed27f2b90dd2668e635ca | [
"MIT"
] | null | null | null | # Raw "fast" observable containers for B/RK analyzer
#
#
# Mikael Mieskolainen, 2020
# m.mieskolainen@imperial.ac.uk
import bz2
import copy
import numpy as np
import iceplot
import icebrk.tools as tools
obs_template = {
# Axis limits
'xlim' : None,
'ylim' : None,
'xlabel' : r'',
'ylabel' : r'Counts',
'units' : r'',
'label' : r'',
'figsize' : (4,4),
# Histogramming
'bins' : iceplot.stepspace(0.0, 10.0, 0.1),
'density' : False,
# Function to calculate
'func' : None,
# Disk save
'pickle' : False
}
# Fast triplet histograms
fasthist = {
'BToKEE_l1_isPF': {'xmin': 0, 'xmax': 2, 'nbins': 2},
'BToKEE_l2_isPF': {'xmin': 0, 'xmax': 2, 'nbins': 2}
}
def initialize():
"""Initialize histogram dictionaries.
Args:
Returns:
obj
"""
# For signal and background
hobj = {'S': dict(), 'B': dict()}
# Over different sources
for mode in hobj.keys():
# Over histograms
for key in fasthist.keys():
obs = copy.deepcopy(obs_template)
obs['xlabel'] = key
obs['bins'] = np.linspace(fasthist[key]['xmin'], fasthist[key]['xmax'], fasthist[key]['nbins'])
hobj[mode][key] = copy.deepcopy(obs)
return hobj
| 18.544118 | 111 | 0.57732 |
import bz2
import copy
import numpy as np
import iceplot
import icebrk.tools as tools
obs_template = {
'xlim' : None,
'ylim' : None,
'xlabel' : r'',
'ylabel' : r'Counts',
'units' : r'',
'label' : r'',
'figsize' : (4,4),
'bins' : iceplot.stepspace(0.0, 10.0, 0.1),
'density' : False,
'func' : None,
'pickle' : False
}
fasthist = {
'BToKEE_l1_isPF': {'xmin': 0, 'xmax': 2, 'nbins': 2},
'BToKEE_l2_isPF': {'xmin': 0, 'xmax': 2, 'nbins': 2}
}
def initialize():
hobj = {'S': dict(), 'B': dict()}
for mode in hobj.keys():
for key in fasthist.keys():
obs = copy.deepcopy(obs_template)
obs['xlabel'] = key
obs['bins'] = np.linspace(fasthist[key]['xmin'], fasthist[key]['xmax'], fasthist[key]['nbins'])
hobj[mode][key] = copy.deepcopy(obs)
return hobj
| true | true |
1c3ce94bb65cdb4ca0f7118303a99a2ad1845a8d | 16,866 | py | Python | src/oci/optimizer/models/profile_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | 3 | 2020-09-10T22:09:45.000Z | 2021-12-24T17:00:07.000Z | src/oci/optimizer/models/profile_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/optimizer/models/profile_summary.py | ezequielramos/oci-python-sdk | cc4235cf217beaf9feed75760e9ce82610222762 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ProfileSummary(object):
"""
The metadata associated with the profile summary.
"""
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "INACTIVE"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "ATTACHING"
LIFECYCLE_STATE_ATTACHING = "ATTACHING"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "DETACHING"
LIFECYCLE_STATE_DETACHING = "DETACHING"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a ProfileSummary.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
def __init__(self, **kwargs):
"""
Initializes a new ProfileSummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this ProfileSummary.
:type id: str
:param compartment_id:
The value to assign to the compartment_id property of this ProfileSummary.
:type compartment_id: str
:param name:
The value to assign to the name property of this ProfileSummary.
:type name: str
:param description:
The value to assign to the description property of this ProfileSummary.
:type description: str
:param aggregation_interval_in_days:
The value to assign to the aggregation_interval_in_days property of this ProfileSummary.
:type aggregation_interval_in_days: int
:param defined_tags:
The value to assign to the defined_tags property of this ProfileSummary.
:type defined_tags: dict(str, dict(str, object))
:param freeform_tags:
The value to assign to the freeform_tags property of this ProfileSummary.
:type freeform_tags: dict(str, str)
:param lifecycle_state:
The value to assign to the lifecycle_state property of this ProfileSummary.
Allowed values for this property are: "ACTIVE", "FAILED", "INACTIVE", "ATTACHING", "DETACHING", "DELETING", "DELETED", "UPDATING", "CREATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param levels_configuration:
The value to assign to the levels_configuration property of this ProfileSummary.
:type levels_configuration: oci.optimizer.models.LevelsConfiguration
:param target_compartments:
The value to assign to the target_compartments property of this ProfileSummary.
:type target_compartments: oci.optimizer.models.TargetCompartments
:param target_tags:
The value to assign to the target_tags property of this ProfileSummary.
:type target_tags: oci.optimizer.models.TargetTags
:param time_created:
The value to assign to the time_created property of this ProfileSummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this ProfileSummary.
:type time_updated: datetime
"""
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'name': 'str',
'description': 'str',
'aggregation_interval_in_days': 'int',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)',
'lifecycle_state': 'str',
'levels_configuration': 'LevelsConfiguration',
'target_compartments': 'TargetCompartments',
'target_tags': 'TargetTags',
'time_created': 'datetime',
'time_updated': 'datetime'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'name': 'name',
'description': 'description',
'aggregation_interval_in_days': 'aggregationIntervalInDays',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags',
'lifecycle_state': 'lifecycleState',
'levels_configuration': 'levelsConfiguration',
'target_compartments': 'targetCompartments',
'target_tags': 'targetTags',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated'
}
self._id = None
self._compartment_id = None
self._name = None
self._description = None
self._aggregation_interval_in_days = None
self._defined_tags = None
self._freeform_tags = None
self._lifecycle_state = None
self._levels_configuration = None
self._target_compartments = None
self._target_tags = None
self._time_created = None
self._time_updated = None
@property
def id(self):
"""
**[Required]** Gets the id of this ProfileSummary.
The unique OCID of the profile.
:return: The id of this ProfileSummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this ProfileSummary.
The unique OCID of the profile.
:param id: The id of this ProfileSummary.
:type: str
"""
self._id = id
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this ProfileSummary.
The OCID of the tenancy. The tenancy is the root compartment.
:return: The compartment_id of this ProfileSummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ProfileSummary.
The OCID of the tenancy. The tenancy is the root compartment.
:param compartment_id: The compartment_id of this ProfileSummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def name(self):
"""
**[Required]** Gets the name of this ProfileSummary.
The name assigned to the profile.
:return: The name of this ProfileSummary.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this ProfileSummary.
The name assigned to the profile.
:param name: The name of this ProfileSummary.
:type: str
"""
self._name = name
@property
def description(self):
"""
**[Required]** Gets the description of this ProfileSummary.
Text describing the profile.
:return: The description of this ProfileSummary.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""
Sets the description of this ProfileSummary.
Text describing the profile.
:param description: The description of this ProfileSummary.
:type: str
"""
self._description = description
@property
def aggregation_interval_in_days(self):
"""
Gets the aggregation_interval_in_days of this ProfileSummary.
The time period over which to collect data for the recommendations, measured in number of days.
:return: The aggregation_interval_in_days of this ProfileSummary.
:rtype: int
"""
return self._aggregation_interval_in_days
@aggregation_interval_in_days.setter
def aggregation_interval_in_days(self, aggregation_interval_in_days):
"""
Sets the aggregation_interval_in_days of this ProfileSummary.
The time period over which to collect data for the recommendations, measured in number of days.
:param aggregation_interval_in_days: The aggregation_interval_in_days of this ProfileSummary.
:type: int
"""
self._aggregation_interval_in_days = aggregation_interval_in_days
@property
def defined_tags(self):
"""
Gets the defined_tags of this ProfileSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this ProfileSummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this ProfileSummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this ProfileSummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this ProfileSummary.
Simple key-value pair applied without any predefined name, type, or namespace.
For more information, see `Resource Tags`__. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this ProfileSummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this ProfileSummary.
Simple key-value pair applied without any predefined name, type, or namespace.
For more information, see `Resource Tags`__. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this ProfileSummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this ProfileSummary.
The profile's current state.
Allowed values for this property are: "ACTIVE", "FAILED", "INACTIVE", "ATTACHING", "DETACHING", "DELETING", "DELETED", "UPDATING", "CREATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this ProfileSummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this ProfileSummary.
The profile's current state.
:param lifecycle_state: The lifecycle_state of this ProfileSummary.
:type: str
"""
allowed_values = ["ACTIVE", "FAILED", "INACTIVE", "ATTACHING", "DETACHING", "DELETING", "DELETED", "UPDATING", "CREATING"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def levels_configuration(self):
"""
Gets the levels_configuration of this ProfileSummary.
:return: The levels_configuration of this ProfileSummary.
:rtype: oci.optimizer.models.LevelsConfiguration
"""
return self._levels_configuration
@levels_configuration.setter
def levels_configuration(self, levels_configuration):
"""
Sets the levels_configuration of this ProfileSummary.
:param levels_configuration: The levels_configuration of this ProfileSummary.
:type: oci.optimizer.models.LevelsConfiguration
"""
self._levels_configuration = levels_configuration
@property
def target_compartments(self):
"""
Gets the target_compartments of this ProfileSummary.
:return: The target_compartments of this ProfileSummary.
:rtype: oci.optimizer.models.TargetCompartments
"""
return self._target_compartments
@target_compartments.setter
def target_compartments(self, target_compartments):
"""
Sets the target_compartments of this ProfileSummary.
:param target_compartments: The target_compartments of this ProfileSummary.
:type: oci.optimizer.models.TargetCompartments
"""
self._target_compartments = target_compartments
@property
def target_tags(self):
"""
Gets the target_tags of this ProfileSummary.
:return: The target_tags of this ProfileSummary.
:rtype: oci.optimizer.models.TargetTags
"""
return self._target_tags
@target_tags.setter
def target_tags(self, target_tags):
"""
Sets the target_tags of this ProfileSummary.
:param target_tags: The target_tags of this ProfileSummary.
:type: oci.optimizer.models.TargetTags
"""
self._target_tags = target_tags
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this ProfileSummary.
The date and time the profile was created, in the format defined by RFC3339.
:return: The time_created of this ProfileSummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this ProfileSummary.
The date and time the profile was created, in the format defined by RFC3339.
:param time_created: The time_created of this ProfileSummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
**[Required]** Gets the time_updated of this ProfileSummary.
The date and time the profile was last updated, in the format defined by RFC3339.
:return: The time_updated of this ProfileSummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this ProfileSummary.
The date and time the profile was last updated, in the format defined by RFC3339.
:param time_updated: The time_updated of this ProfileSummary.
:type: datetime
"""
self._time_updated = time_updated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.072727 | 245 | 0.655461 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ProfileSummary(object):
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
LIFECYCLE_STATE_FAILED = "FAILED"
LIFECYCLE_STATE_INACTIVE = "INACTIVE"
LIFECYCLE_STATE_ATTACHING = "ATTACHING"
LIFECYCLE_STATE_DETACHING = "DETACHING"
LIFECYCLE_STATE_DELETING = "DELETING"
LIFECYCLE_STATE_DELETED = "DELETED"
LIFECYCLE_STATE_UPDATING = "UPDATING"
LIFECYCLE_STATE_CREATING = "CREATING"
def __init__(self, **kwargs):
self.swagger_types = {
'id': 'str',
'compartment_id': 'str',
'name': 'str',
'description': 'str',
'aggregation_interval_in_days': 'int',
'defined_tags': 'dict(str, dict(str, object))',
'freeform_tags': 'dict(str, str)',
'lifecycle_state': 'str',
'levels_configuration': 'LevelsConfiguration',
'target_compartments': 'TargetCompartments',
'target_tags': 'TargetTags',
'time_created': 'datetime',
'time_updated': 'datetime'
}
self.attribute_map = {
'id': 'id',
'compartment_id': 'compartmentId',
'name': 'name',
'description': 'description',
'aggregation_interval_in_days': 'aggregationIntervalInDays',
'defined_tags': 'definedTags',
'freeform_tags': 'freeformTags',
'lifecycle_state': 'lifecycleState',
'levels_configuration': 'levelsConfiguration',
'target_compartments': 'targetCompartments',
'target_tags': 'targetTags',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated'
}
self._id = None
self._compartment_id = None
self._name = None
self._description = None
self._aggregation_interval_in_days = None
self._defined_tags = None
self._freeform_tags = None
self._lifecycle_state = None
self._levels_configuration = None
self._target_compartments = None
self._target_tags = None
self._time_created = None
self._time_updated = None
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def compartment_id(self):
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
self._compartment_id = compartment_id
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def description(self):
return self._description
@description.setter
def description(self, description):
self._description = description
@property
def aggregation_interval_in_days(self):
return self._aggregation_interval_in_days
@aggregation_interval_in_days.setter
def aggregation_interval_in_days(self, aggregation_interval_in_days):
self._aggregation_interval_in_days = aggregation_interval_in_days
@property
def defined_tags(self):
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
self._defined_tags = defined_tags
@property
def freeform_tags(self):
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
self._freeform_tags = freeform_tags
@property
def lifecycle_state(self):
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
allowed_values = ["ACTIVE", "FAILED", "INACTIVE", "ATTACHING", "DETACHING", "DELETING", "DELETED", "UPDATING", "CREATING"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def levels_configuration(self):
return self._levels_configuration
@levels_configuration.setter
def levels_configuration(self, levels_configuration):
self._levels_configuration = levels_configuration
@property
def target_compartments(self):
return self._target_compartments
@target_compartments.setter
def target_compartments(self, target_compartments):
self._target_compartments = target_compartments
@property
def target_tags(self):
return self._target_tags
@target_tags.setter
def target_tags(self, target_tags):
self._target_tags = target_tags
@property
def time_created(self):
return self._time_created
@time_created.setter
def time_created(self, time_created):
self._time_created = time_created
@property
def time_updated(self):
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
self._time_updated = time_updated
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3ce970d1f1279124b396bc1ce1b5f8a9040cfb | 38,653 | py | Python | test/functional/s3api/test_multi_upload.py | gyaozhou/swift-read | 16fe18ae3be59a095f3bafdd69fe74b48a2771cb | [
"Apache-2.0"
] | null | null | null | test/functional/s3api/test_multi_upload.py | gyaozhou/swift-read | 16fe18ae3be59a095f3bafdd69fe74b48a2771cb | [
"Apache-2.0"
] | null | null | null | test/functional/s3api/test_multi_upload.py | gyaozhou/swift-read | 16fe18ae3be59a095f3bafdd69fe74b48a2771cb | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import unittest2
import os
import boto
# For an issue with venv and distutils, disable pylint message here
# pylint: disable-msg=E0611,F0401
from distutils.version import StrictVersion
from hashlib import md5
from itertools import izip, izip_longest
import test.functional as tf
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
SubElement
from swift.common.middleware.s3api.utils import mktime
from test.functional.s3api import S3ApiBase
from test.functional.s3api.s3_test_client import Connection
from test.functional.s3api.utils import get_error_code, get_error_msg
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestS3ApiMultiUpload(S3ApiBase):
def setUp(self):
super(TestS3ApiMultiUpload, self).setUp()
if not tf.cluster_info['s3api'].get('allow_multipart_uploads', False):
raise tf.SkipTest('multipart upload is not enebled')
self.min_segment_size = int(tf.cluster_info['s3api'].get(
'min_segment_size', 5242880))
def _gen_comp_xml(self, etags):
elem = Element('CompleteMultipartUpload')
for i, etag in enumerate(etags):
elem_part = SubElement(elem, 'Part')
SubElement(elem_part, 'PartNumber').text = str(i + 1)
SubElement(elem_part, 'ETag').text = etag
return tostring(elem)
def _initiate_multi_uploads_result_generator(self, bucket, keys,
headers=None, trials=1):
if headers is None:
headers = [None] * len(keys)
self.conn.make_request('PUT', bucket)
query = 'uploads'
for key, key_headers in izip_longest(keys, headers):
for i in xrange(trials):
status, resp_headers, body = \
self.conn.make_request('POST', bucket, key,
headers=key_headers, query=query)
yield status, resp_headers, body
def _upload_part(self, bucket, key, upload_id, content=None, part_num=1):
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
content = content if content else 'a' * self.min_segment_size
status, headers, body = \
self.conn.make_request('PUT', bucket, key, body=content,
query=query)
return status, headers, body
def _upload_part_copy(self, src_bucket, src_obj, dst_bucket, dst_key,
upload_id, part_num=1, src_range=None):
src_path = '%s/%s' % (src_bucket, src_obj)
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
req_headers = {'X-Amz-Copy-Source': src_path}
if src_range:
req_headers['X-Amz-Copy-Source-Range'] = src_range
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_key,
headers=req_headers,
query=query)
elem = fromstring(body, 'CopyPartResult')
etag = elem.find('ETag').text.strip('"')
return status, headers, body, etag
def _complete_multi_upload(self, bucket, key, upload_id, xml):
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
return status, headers, body
def test_object_multi_upload(self):
bucket = 'bucket'
keys = ['obj1', 'obj2', 'obj3']
headers = [None,
{'Content-MD5': base64.b64encode('a' * 16).strip()},
{'Etag': 'nonsense'}]
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys, headers=headers)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# List Multipart Uploads
query = 'uploads'
status, headers, body = \
self.conn.make_request('GET', bucket, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertIsNone(elem.find('KeyMarker').text)
self.assertEqual(elem.find('NextKeyMarker').text, uploads[-1][0])
self.assertIsNone(elem.find('UploadIdMarker').text)
self.assertEqual(elem.find('NextUploadIdMarker').text, uploads[-1][1])
self.assertEqual(elem.find('MaxUploads').text, '1000')
self.assertTrue(elem.find('EncodingType') is None)
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Upload')), 3)
for (expected_key, expected_upload_id), u in \
izip(uploads, elem.findall('Upload')):
key = u.find('Key').text
upload_id = u.find('UploadId').text
self.assertEqual(expected_key, key)
self.assertEqual(expected_upload_id, upload_id)
self.assertEqual(u.find('Initiator/ID').text,
self.conn.user_id)
self.assertEqual(u.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(u.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('StorageClass').text, 'STANDARD')
self.assertTrue(u.find('Initiated').text is not None)
# Upload Part
key, upload_id = uploads[0]
content = 'a' * self.min_segment_size
etag = md5(content).hexdigest()
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers, etag)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
expected_parts_list = [(headers['etag'], mktime(headers['date']))]
# Upload Part Copy
key, upload_id = uploads[1]
src_bucket = 'bucket2'
src_obj = 'obj3'
src_content = 'b' * self.min_segment_size
etag = md5(src_content).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[1]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(200, status)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
# There should be *exactly* two parts in the result
self.assertEqual(1, len(last_modified_gets))
# List Parts
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListPartsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
self.assertEqual(elem.find('UploadId').text, upload_id)
self.assertEqual(elem.find('Initiator/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('StorageClass').text, 'STANDARD')
self.assertEqual(elem.find('PartNumberMarker').text, '0')
self.assertEqual(elem.find('NextPartNumberMarker').text, '1')
self.assertEqual(elem.find('MaxParts').text, '1000')
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Part')), 1)
# etags will be used to generate xml for Complete Multipart Upload
etags = []
for (expected_etag, expected_date), p in \
izip(expected_parts_list, elem.findall('Part')):
last_modified = p.find('LastModified').text
self.assertTrue(last_modified is not None)
# TODO: sanity check
# (kota_) How do we check the sanity?
# the last-modified header drops milli-seconds info
# by the constraint of the format.
# For now, we can do either the format check or round check
# last_modified_from_xml = mktime(last_modified)
# self.assertEqual(expected_date,
# last_modified_from_xml)
self.assertEqual(expected_etag, p.find('ETag').text)
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
etags.append(p.find('ETag').text)
# Abort Multipart Uploads
# note that uploads[1] has part data while uploads[2] does not
for key, upload_id in uploads[1:]:
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'],
'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
# Complete Multipart Upload
key, upload_id = uploads[0]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'CompleteMultipartUploadResult')
# TODO: use tf.config value
self.assertEqual(
'http://%s:%s/bucket/obj1' % (self.conn.host, self.conn.port),
elem.find('Location').text)
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
concatted_etags = ''.join(etag.strip('"') for etag in etags)
exp_etag = '"%s-%s"' % (
md5(concatted_etags.decode('hex')).hexdigest(), len(etags))
etag = elem.find('ETag').text
self.assertEqual(etag, exp_etag)
exp_size = self.min_segment_size * len(etags)
swift_etag = '"%s"' % md5(concatted_etags).hexdigest()
# TODO: GET via swift api, check against swift_etag
# Check object
def check_obj(req_headers, exp_status):
status, headers, body = \
self.conn.make_request('HEAD', bucket, key, req_headers)
self.assertEqual(status, exp_status)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-length', headers)
if exp_status == 412:
self.assertNotIn('etag', headers)
self.assertEqual(headers['content-length'], '0')
else:
self.assertIn('etag', headers)
self.assertEqual(headers['etag'], exp_etag)
if exp_status == 304:
self.assertEqual(headers['content-length'], '0')
else:
self.assertEqual(headers['content-length'], str(exp_size))
check_obj({}, 200)
# Sanity check conditionals
check_obj({'If-Match': 'some other thing'}, 412)
check_obj({'If-None-Match': 'some other thing'}, 200)
# More interesting conditional cases
check_obj({'If-Match': exp_etag}, 200)
check_obj({'If-Match': swift_etag}, 412)
check_obj({'If-None-Match': swift_etag}, 200)
check_obj({'If-None-Match': exp_etag}, 304)
# Check listings
status, headers, body = self.conn.make_request('GET', bucket)
self.assertEqual(status, 200)
elem = fromstring(body, 'ListBucketResult')
resp_objects = elem.findall('./Contents')
self.assertEqual(len(list(resp_objects)), 1)
for o in resp_objects:
self.assertEqual(o.find('Key').text, key)
self.assertIsNotNone(o.find('LastModified').text)
self.assertRegexpMatches(
o.find('LastModified').text,
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
self.assertEqual(o.find('ETag').text, exp_etag)
self.assertEqual(o.find('Size').text, str(exp_size))
self.assertIsNotNone(o.find('StorageClass').text is not None)
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(o.find('Owner/DisplayName').text,
self.conn.user_id)
def test_initiate_multi_upload_error(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, resp_headers, body = \
self.conn.make_request('POST', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
def test_list_multi_uploads_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
def test_upload_part_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
key = 'obj'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'partNumber=%s&uploadId=%s' % (0, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'InvalidArgument')
err_msg = 'Part number must be an integer between 1 and'
self.assertTrue(err_msg in get_error_msg(body))
def test_upload_part_copy_error(self):
src_bucket = 'src'
src_obj = 'src'
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj)
src_path = '%s/%s' % (src_bucket, src_obj)
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key,
headers={
'X-Amz-Copy-Source': src_path
},
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
src_path = '%s/%s' % (src_bucket, 'nothing')
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchKey')
def test_list_parts_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_abort_multi_upload_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
self._upload_part(bucket, key, upload_id)
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('DELETE', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
status, headers, body = \
self.conn.make_request('DELETE', bucket, 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_complete_multi_upload_error(self):
bucket = 'bucket'
keys = ['obj', 'obj2']
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, keys[0], query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
# part 1 too small
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# invalid credentials
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
# wrong/missing bucket
status, headers, body = \
self.conn.make_request('POST', 'nothing', keys[0], query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
# wrong upload ID
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
# without Part tag in xml
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'MalformedXML')
# with invalid etag in xml
invalid_etag = 'invalid'
xml = self._gen_comp_xml([invalid_etag])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
# without part in Swift
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([etags[0]])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
def test_complete_upload_min_segment_size(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
# multi parts with no body
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts less than min segment size
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# one part and less than min segment size
etags = []
query = 'partNumber=1&uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
# multi parts with all parts except the first part less than min
# segment size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size - 1, 2]
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts except last part more than min segment
# size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size, 2]
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_complete_upload_with_fewer_etags(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 4):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
body='A' * 1024 * 1024 * 5, query=query)
etags.append(headers['etag'])
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml(etags[:-1])
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_object_multi_upload_part_copy_range(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# Upload Part Copy Range
key, upload_id = uploads[0]
src_bucket = 'bucket2'
src_obj = 'obj4'
src_content = 'y' * (self.min_segment_size / 2) + 'z' * \
self.min_segment_size
src_range = 'bytes=0-%d' % (self.min_segment_size - 1)
etag = md5(src_content[:self.min_segment_size]).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id, 1, src_range)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
# There should be *exactly* one parts in the result
self.assertEqual(1, len(last_modified_gets))
# Abort Multipart Upload
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
# sanity checks
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
class TestS3ApiMultiUploadSigV4(TestS3ApiMultiUpload):
@classmethod
def setUpClass(cls):
os.environ['S3_USE_SIGV4'] = "True"
@classmethod
def tearDownClass(cls):
del os.environ['S3_USE_SIGV4']
def setUp(self):
super(TestS3ApiMultiUploadSigV4, self).setUp()
def test_object_multi_upload_part_copy_range(self):
if StrictVersion(boto.__version__) < StrictVersion('3.0'):
self.skipTest('This stuff got the issue of boto<=2.x')
def test_delete_bucket_multi_upload_object_exisiting(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, _, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200) # sanity
elem = fromstring(body, 'InitiateMultipartUploadResult')
key = elem.find('Key').text
self.assertEqual(expected_key, key) # sanity
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None) # sanity
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# Upload Part
key, upload_id = uploads[0]
content = 'a' * self.min_segment_size
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
# Complete Multipart Upload
key, upload_id = uploads[0]
etags = [md5(content).hexdigest()]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200) # sanity
# GET multipart object
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200) # sanity
self.assertEqual(content, body) # sanity
# DELETE bucket while the object existing
status, headers, body = \
self.conn.make_request('DELETE', bucket)
self.assertEqual(status, 409) # sanity
# The object must still be there.
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200) # sanity
self.assertEqual(content, body) # sanity
if __name__ == '__main__':
unittest2.main()
| 42.616318 | 80 | 0.595245 |
import base64
import unittest2
import os
import boto
from distutils.version import StrictVersion
from hashlib import md5
from itertools import izip, izip_longest
import test.functional as tf
from swift.common.middleware.s3api.etree import fromstring, tostring, Element, \
SubElement
from swift.common.middleware.s3api.utils import mktime
from test.functional.s3api import S3ApiBase
from test.functional.s3api.s3_test_client import Connection
from test.functional.s3api.utils import get_error_code, get_error_msg
def setUpModule():
tf.setup_package()
def tearDownModule():
tf.teardown_package()
class TestS3ApiMultiUpload(S3ApiBase):
def setUp(self):
super(TestS3ApiMultiUpload, self).setUp()
if not tf.cluster_info['s3api'].get('allow_multipart_uploads', False):
raise tf.SkipTest('multipart upload is not enebled')
self.min_segment_size = int(tf.cluster_info['s3api'].get(
'min_segment_size', 5242880))
def _gen_comp_xml(self, etags):
elem = Element('CompleteMultipartUpload')
for i, etag in enumerate(etags):
elem_part = SubElement(elem, 'Part')
SubElement(elem_part, 'PartNumber').text = str(i + 1)
SubElement(elem_part, 'ETag').text = etag
return tostring(elem)
def _initiate_multi_uploads_result_generator(self, bucket, keys,
headers=None, trials=1):
if headers is None:
headers = [None] * len(keys)
self.conn.make_request('PUT', bucket)
query = 'uploads'
for key, key_headers in izip_longest(keys, headers):
for i in xrange(trials):
status, resp_headers, body = \
self.conn.make_request('POST', bucket, key,
headers=key_headers, query=query)
yield status, resp_headers, body
def _upload_part(self, bucket, key, upload_id, content=None, part_num=1):
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
content = content if content else 'a' * self.min_segment_size
status, headers, body = \
self.conn.make_request('PUT', bucket, key, body=content,
query=query)
return status, headers, body
def _upload_part_copy(self, src_bucket, src_obj, dst_bucket, dst_key,
upload_id, part_num=1, src_range=None):
src_path = '%s/%s' % (src_bucket, src_obj)
query = 'partNumber=%s&uploadId=%s' % (part_num, upload_id)
req_headers = {'X-Amz-Copy-Source': src_path}
if src_range:
req_headers['X-Amz-Copy-Source-Range'] = src_range
status, headers, body = \
self.conn.make_request('PUT', dst_bucket, dst_key,
headers=req_headers,
query=query)
elem = fromstring(body, 'CopyPartResult')
etag = elem.find('ETag').text.strip('"')
return status, headers, body, etag
def _complete_multi_upload(self, bucket, key, upload_id, xml):
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
return status, headers, body
def test_object_multi_upload(self):
bucket = 'bucket'
keys = ['obj1', 'obj2', 'obj3']
headers = [None,
{'Content-MD5': base64.b64encode('a' * 16).strip()},
{'Etag': 'nonsense'}]
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys, headers=headers)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# List Multipart Uploads
query = 'uploads'
status, headers, body = \
self.conn.make_request('GET', bucket, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListMultipartUploadsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertIsNone(elem.find('KeyMarker').text)
self.assertEqual(elem.find('NextKeyMarker').text, uploads[-1][0])
self.assertIsNone(elem.find('UploadIdMarker').text)
self.assertEqual(elem.find('NextUploadIdMarker').text, uploads[-1][1])
self.assertEqual(elem.find('MaxUploads').text, '1000')
self.assertTrue(elem.find('EncodingType') is None)
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Upload')), 3)
for (expected_key, expected_upload_id), u in \
izip(uploads, elem.findall('Upload')):
key = u.find('Key').text
upload_id = u.find('UploadId').text
self.assertEqual(expected_key, key)
self.assertEqual(expected_upload_id, upload_id)
self.assertEqual(u.find('Initiator/ID').text,
self.conn.user_id)
self.assertEqual(u.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(u.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(u.find('StorageClass').text, 'STANDARD')
self.assertTrue(u.find('Initiated').text is not None)
# Upload Part
key, upload_id = uploads[0]
content = 'a' * self.min_segment_size
etag = md5(content).hexdigest()
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers, etag)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
expected_parts_list = [(headers['etag'], mktime(headers['date']))]
# Upload Part Copy
key, upload_id = uploads[1]
src_bucket = 'bucket2'
src_obj = 'obj3'
src_content = 'b' * self.min_segment_size
etag = md5(src_content).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[1]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(200, status)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
# There should be *exactly* two parts in the result
self.assertEqual(1, len(last_modified_gets))
# List Parts
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'ListPartsResult')
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
self.assertEqual(elem.find('UploadId').text, upload_id)
self.assertEqual(elem.find('Initiator/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Initiator/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(elem.find('Owner/DisplayName').text,
self.conn.user_id)
self.assertEqual(elem.find('StorageClass').text, 'STANDARD')
self.assertEqual(elem.find('PartNumberMarker').text, '0')
self.assertEqual(elem.find('NextPartNumberMarker').text, '1')
self.assertEqual(elem.find('MaxParts').text, '1000')
self.assertEqual(elem.find('IsTruncated').text, 'false')
self.assertEqual(len(elem.findall('Part')), 1)
# etags will be used to generate xml for Complete Multipart Upload
etags = []
for (expected_etag, expected_date), p in \
izip(expected_parts_list, elem.findall('Part')):
last_modified = p.find('LastModified').text
self.assertTrue(last_modified is not None)
# TODO: sanity check
# (kota_) How do we check the sanity?
# the last-modified header drops milli-seconds info
# by the constraint of the format.
# For now, we can do either the format check or round check
# last_modified_from_xml = mktime(last_modified)
# self.assertEqual(expected_date,
# last_modified_from_xml)
self.assertEqual(expected_etag, p.find('ETag').text)
self.assertEqual(self.min_segment_size, int(p.find('Size').text))
etags.append(p.find('ETag').text)
# Abort Multipart Uploads
# note that uploads[1] has part data while uploads[2] does not
for key, upload_id in uploads[1:]:
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'],
'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
# Complete Multipart Upload
key, upload_id = uploads[0]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'CompleteMultipartUploadResult')
# TODO: use tf.config value
self.assertEqual(
'http://%s:%s/bucket/obj1' % (self.conn.host, self.conn.port),
elem.find('Location').text)
self.assertEqual(elem.find('Bucket').text, bucket)
self.assertEqual(elem.find('Key').text, key)
concatted_etags = ''.join(etag.strip('"') for etag in etags)
exp_etag = '"%s-%s"' % (
md5(concatted_etags.decode('hex')).hexdigest(), len(etags))
etag = elem.find('ETag').text
self.assertEqual(etag, exp_etag)
exp_size = self.min_segment_size * len(etags)
swift_etag = '"%s"' % md5(concatted_etags).hexdigest()
# TODO: GET via swift api, check against swift_etag
# Check object
def check_obj(req_headers, exp_status):
status, headers, body = \
self.conn.make_request('HEAD', bucket, key, req_headers)
self.assertEqual(status, exp_status)
self.assertCommonResponseHeaders(headers)
self.assertIn('content-length', headers)
if exp_status == 412:
self.assertNotIn('etag', headers)
self.assertEqual(headers['content-length'], '0')
else:
self.assertIn('etag', headers)
self.assertEqual(headers['etag'], exp_etag)
if exp_status == 304:
self.assertEqual(headers['content-length'], '0')
else:
self.assertEqual(headers['content-length'], str(exp_size))
check_obj({}, 200)
# Sanity check conditionals
check_obj({'If-Match': 'some other thing'}, 412)
check_obj({'If-None-Match': 'some other thing'}, 200)
# More interesting conditional cases
check_obj({'If-Match': exp_etag}, 200)
check_obj({'If-Match': swift_etag}, 412)
check_obj({'If-None-Match': swift_etag}, 200)
check_obj({'If-None-Match': exp_etag}, 304)
# Check listings
status, headers, body = self.conn.make_request('GET', bucket)
self.assertEqual(status, 200)
elem = fromstring(body, 'ListBucketResult')
resp_objects = elem.findall('./Contents')
self.assertEqual(len(list(resp_objects)), 1)
for o in resp_objects:
self.assertEqual(o.find('Key').text, key)
self.assertIsNotNone(o.find('LastModified').text)
self.assertRegexpMatches(
o.find('LastModified').text,
r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{3}Z$')
self.assertEqual(o.find('ETag').text, exp_etag)
self.assertEqual(o.find('Size').text, str(exp_size))
self.assertIsNotNone(o.find('StorageClass').text is not None)
self.assertEqual(o.find('Owner/ID').text, self.conn.user_id)
self.assertEqual(o.find('Owner/DisplayName').text,
self.conn.user_id)
def test_initiate_multi_upload_error(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, resp_headers, body = \
self.conn.make_request('POST', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
def test_list_multi_uploads_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
def test_upload_part_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
query = 'uploads'
key = 'obj'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'partNumber=%s&uploadId=%s' % (0, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'InvalidArgument')
err_msg = 'Part number must be an integer between 1 and'
self.assertTrue(err_msg in get_error_msg(body))
def test_upload_part_copy_error(self):
src_bucket = 'src'
src_obj = 'src'
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj)
src_path = '%s/%s' % (src_bucket, src_obj)
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('PUT', bucket, key,
headers={
'X-Amz-Copy-Source': src_path
},
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('PUT', 'nothing', key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'partNumber=%s&uploadId=%s' % (1, 'nothing')
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
src_path = '%s/%s' % (src_bucket, 'nothing')
query = 'partNumber=%s&uploadId=%s' % (1, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
headers={'X-Amz-Copy-Source': src_path},
query=query)
self.assertEqual(get_error_code(body), 'NoSuchKey')
def test_list_parts_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('GET', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_abort_multi_upload_error(self):
bucket = 'bucket'
self.conn.make_request('PUT', bucket)
key = 'obj'
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
self._upload_part(bucket, key, upload_id)
query = 'uploadId=%s' % upload_id
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
status, headers, body = \
self.conn.make_request('DELETE', 'nothing', key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
status, headers, body = \
self.conn.make_request('DELETE', bucket, 'nothing', query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
def test_complete_multi_upload_error(self):
bucket = 'bucket'
keys = ['obj', 'obj2']
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, keys[0], query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
# part 1 too small
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# invalid credentials
auth_error_conn = Connection(aws_secret_key='invalid')
status, headers, body = \
auth_error_conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'SignatureDoesNotMatch')
# wrong/missing bucket
status, headers, body = \
self.conn.make_request('POST', 'nothing', keys[0], query=query)
self.assertEqual(get_error_code(body), 'NoSuchBucket')
# wrong upload ID
query = 'uploadId=%s' % 'nothing'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'NoSuchUpload')
# without Part tag in xml
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'MalformedXML')
# with invalid etag in xml
invalid_etag = 'invalid'
xml = self._gen_comp_xml([invalid_etag])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[0], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
# without part in Swift
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml([etags[0]])
status, headers, body = \
self.conn.make_request('POST', bucket, keys[1], body=xml,
query=query)
self.assertEqual(get_error_code(body), 'InvalidPart')
def test_complete_upload_min_segment_size(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
# multi parts with no body
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query)
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts less than min segment size
etags = []
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# one part and less than min segment size
etags = []
query = 'partNumber=1&uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='AA')
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
# multi parts with all parts except the first part less than min
# segment size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size - 1, 2]
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(get_error_code(body), 'EntityTooSmall')
# multi parts with all parts except last part more than min segment
# size
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
body_size = [self.min_segment_size, self.min_segment_size, 2]
for i in xrange(1, 3):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key, query=query,
body='A' * body_size[i])
etags.append(headers['etag'])
xml = self._gen_comp_xml(etags)
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_complete_upload_with_fewer_etags(self):
bucket = 'bucket'
key = 'obj'
self.conn.make_request('PUT', bucket)
query = 'uploads'
status, headers, body = \
self.conn.make_request('POST', bucket, key, query=query)
elem = fromstring(body, 'InitiateMultipartUploadResult')
upload_id = elem.find('UploadId').text
etags = []
for i in xrange(1, 4):
query = 'partNumber=%s&uploadId=%s' % (i, upload_id)
status, headers, body = \
self.conn.make_request('PUT', bucket, key,
body='A' * 1024 * 1024 * 5, query=query)
etags.append(headers['etag'])
query = 'uploadId=%s' % upload_id
xml = self._gen_comp_xml(etags[:-1])
status, headers, body = \
self.conn.make_request('POST', bucket, key, body=xml,
query=query)
self.assertEqual(status, 200)
def test_object_multi_upload_part_copy_range(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
# Initiate Multipart Upload
for expected_key, (status, headers, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
elem = fromstring(body, 'InitiateMultipartUploadResult')
self.assertEqual(elem.find('Bucket').text, bucket)
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys)) # sanity
# Upload Part Copy Range
key, upload_id = uploads[0]
src_bucket = 'bucket2'
src_obj = 'obj4'
src_content = 'y' * (self.min_segment_size / 2) + 'z' * \
self.min_segment_size
src_range = 'bytes=0-%d' % (self.min_segment_size - 1)
etag = md5(src_content[:self.min_segment_size]).hexdigest()
# prepare src obj
self.conn.make_request('PUT', src_bucket)
self.conn.make_request('PUT', src_bucket, src_obj, body=src_content)
_, headers, _ = self.conn.make_request('HEAD', src_bucket, src_obj)
self.assertCommonResponseHeaders(headers)
status, headers, body, resp_etag = \
self._upload_part_copy(src_bucket, src_obj, bucket,
key, upload_id, 1, src_range)
self.assertEqual(status, 200)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'application/xml')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], str(len(body)))
self.assertTrue('etag' not in headers)
elem = fromstring(body, 'CopyPartResult')
last_modified = elem.find('LastModified').text
self.assertTrue(last_modified is not None)
self.assertEqual(resp_etag, etag)
# Check last-modified timestamp
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('GET', bucket, key, query=query)
elem = fromstring(body, 'ListPartsResult')
# FIXME: COPY result drops milli/microseconds but GET doesn't
last_modified_gets = [p.find('LastModified').text
for p in elem.iterfind('Part')]
self.assertEqual(
last_modified_gets[0].rsplit('.', 1)[0],
last_modified.rsplit('.', 1)[0],
'%r != %r' % (last_modified_gets[0], last_modified))
self.assertEqual(1, len(last_modified_gets))
key, upload_id = uploads[0]
query = 'uploadId=%s' % upload_id
status, headers, body = \
self.conn.make_request('DELETE', bucket, key, query=query)
self.assertEqual(status, 204)
self.assertCommonResponseHeaders(headers)
self.assertTrue('content-type' in headers)
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
self.assertTrue('content-length' in headers)
self.assertEqual(headers['content-length'], '0')
class TestS3ApiMultiUploadSigV4(TestS3ApiMultiUpload):
@classmethod
def setUpClass(cls):
os.environ['S3_USE_SIGV4'] = "True"
@classmethod
def tearDownClass(cls):
del os.environ['S3_USE_SIGV4']
def setUp(self):
super(TestS3ApiMultiUploadSigV4, self).setUp()
def test_object_multi_upload_part_copy_range(self):
if StrictVersion(boto.__version__) < StrictVersion('3.0'):
self.skipTest('This stuff got the issue of boto<=2.x')
def test_delete_bucket_multi_upload_object_exisiting(self):
bucket = 'bucket'
keys = ['obj1']
uploads = []
results_generator = self._initiate_multi_uploads_result_generator(
bucket, keys)
for expected_key, (status, _, body) in \
izip(keys, results_generator):
self.assertEqual(status, 200)
elem = fromstring(body, 'InitiateMultipartUploadResult')
key = elem.find('Key').text
self.assertEqual(expected_key, key)
upload_id = elem.find('UploadId').text
self.assertTrue(upload_id is not None)
self.assertTrue((key, upload_id) not in uploads)
uploads.append((key, upload_id))
self.assertEqual(len(uploads), len(keys))
key, upload_id = uploads[0]
content = 'a' * self.min_segment_size
status, headers, body = \
self._upload_part(bucket, key, upload_id, content)
self.assertEqual(status, 200)
key, upload_id = uploads[0]
etags = [md5(content).hexdigest()]
xml = self._gen_comp_xml(etags)
status, headers, body = \
self._complete_multi_upload(bucket, key, upload_id, xml)
self.assertEqual(status, 200)
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200)
self.assertEqual(content, body)
status, headers, body = \
self.conn.make_request('DELETE', bucket)
self.assertEqual(status, 409)
status, headers, body = \
self.conn.make_request('GET', bucket, key)
self.assertEqual(status, 200)
self.assertEqual(content, body)
if __name__ == '__main__':
unittest2.main()
| true | true |
1c3ceac72c1a9d599dafe368a8354da196e7f1b9 | 395 | py | Python | problems/217.Contains_Duplicate/AC_set_nlogn.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/217.Contains_Duplicate/AC_set_nlogn.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | problems/217.Contains_Duplicate/AC_set_nlogn.py | subramp-prep/leetcode | d125201d9021ab9b1eea5e5393c2db4edd84e740 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: illuz <iilluzen[at]gmail.com>
# File: AC_set_nlogn.py
# Create Date: 2015-07-13 21:46:54
# Usage: AC_set_nlogn.py
# Descripton:
class Solution:
def containDuplicate(self, nums):
return len(set(nums)) != len(nums)
s = Solution()
print(s.containDuplicate([1, 2, 3, 4]))
print(s.containDuplicate([2, 2, 3, 4]))
| 24.6875 | 44 | 0.622785 |
class Solution:
def containDuplicate(self, nums):
return len(set(nums)) != len(nums)
s = Solution()
print(s.containDuplicate([1, 2, 3, 4]))
print(s.containDuplicate([2, 2, 3, 4]))
| true | true |
1c3ceb46051246691919dd2dac71b76ebef6eaf5 | 4,277 | py | Python | themes/migrations/0001_initial.py | ilblackdragon/django-themes | 38ae4660cc7308dec99914f7a097079064cca9bb | [
"MIT"
] | 19 | 2015-01-21T11:42:30.000Z | 2021-04-07T13:32:54.000Z | themes/migrations/0001_initial.py | ilblackdragon/django-themes | 38ae4660cc7308dec99914f7a097079064cca9bb | [
"MIT"
] | 2 | 2016-03-20T22:24:25.000Z | 2018-02-10T21:27:04.000Z | themes/migrations/0001_initial.py | ilblackdragon/django-themes | 38ae4660cc7308dec99914f7a097079064cca9bb | [
"MIT"
] | 11 | 2015-03-02T10:17:20.000Z | 2021-04-07T13:32:59.000Z | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Theme'
db.create_table('themes_theme', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='theme', unique=True, to=orm['auth.User'])),
('theme', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('themes', ['Theme'])
def backwards(self, orm):
# Deleting model 'Theme'
db.delete_table('themes_theme')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'themes.theme': {
'Meta': {'object_name': 'Theme'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'theme': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'theme'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['themes'] | 61.1 | 182 | 0.567921 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table('themes_theme', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='theme', unique=True, to=orm['auth.User'])),
('theme', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('themes', ['Theme'])
def backwards(self, orm):
db.delete_table('themes_theme')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'themes.theme': {
'Meta': {'object_name': 'Theme'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'theme': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'theme'", 'unique': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['themes'] | true | true |
1c3cec5f21ed99b265c12e7a253adf6b89e40d88 | 8,166 | py | Python | test.py | Neufund/subkey-authentication-server | 37094695f6dcb3289d6311db750168aaf42045f6 | [
"MIT"
] | null | null | null | test.py | Neufund/subkey-authentication-server | 37094695f6dcb3289d6311db750168aaf42045f6 | [
"MIT"
] | null | null | null | test.py | Neufund/subkey-authentication-server | 37094695f6dcb3289d6311db750168aaf42045f6 | [
"MIT"
] | null | null | null | import hashlib
import json
import unittest
from datetime import datetime, timedelta
import jwt
from multimerchant.wallet import Wallet
from multimerchant.wallet.keys import PublicKey
import db
from config import LEDGER_BASE_PATH
from server import app
from utils import pub_to_addr, wallet_to_addr
TEST_DB = "test.json"
class UtilsTestCase(unittest.TestCase):
def testPubToAddr(self):
pub_key = "04d3c41fb2f0e07d71f10416717e450bceb635d54d9b07dea0327f90bfa82f0da" \
"08b40aafd480811d4aba8c17fa768765c6a897009e000f9249c299724fd567414"
address = "0x670884349dd0e57bd1bb71bb6913e921846ba149"
self.assertEqual(pub_to_addr(pub_key), address)
class LedgerJWTServerTestsBase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.config["DB_NAME"] = TEST_DB
def _request_challenge(self, base_address_hash):
return self.app.post(
'/challenge',
data=json.dumps({"base_address_hash": base_address_hash}),
content_type='application/json'
).data.decode("utf-8")
def _solve_challenge(self, token, response_address):
return self.app.post(
'/response',
data=json.dumps({"address": response_address}),
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
def _get_user_data(self, token):
return self.app.get(
'/data',
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
@staticmethod
def _get_data_unsafe(signed):
return jwt.decode(signed, options={
'verify_signature': False,
'verify_aud': False
})
def _login(self, base_address_hash, x_wallet=None, wallet=None):
signed_challenge = self._request_challenge(base_address_hash)
challenge = self._get_data_unsafe(signed_challenge)
path = challenge["path"]
if wallet:
child = wallet.get_child_for_path(path)
else:
x_y_path = path[len(LEDGER_BASE_PATH) + 1:]
y_path = "/".join(x_y_path.split('/')[3:])
child = x_wallet.get_child_for_path(y_path)
address = wallet_to_addr(child)
return self._solve_challenge(signed_challenge, address)
@staticmethod
def _timestamp(time):
return int(time.strftime("%s"))
class ChallengeResponseTests(LedgerJWTServerTestsBase):
def setUp(self):
super(ChallengeResponseTests, self).setUp()
self.base_address_hash = "01b0021097fc768ec42c1828be5131e18b479ab210224122e467f144018396df"
test_data = db.get(self.base_address_hash)
self.x_wallet = Wallet(chain_code=test_data["chainCode"],
public_key=PublicKey.from_hex_key(test_data["pubKey"]))
def testChallenge(self):
signed_challenge = self._request_challenge(self.base_address_hash)
header = jwt.get_unverified_header(signed_challenge)
challenge = self._get_data_unsafe(signed_challenge)
self.assertEqual(header["alg"], "HS512")
self.assertEqual(challenge["aud"], "Challenge")
self.assertEqual(challenge["iss"], "Neufund")
self.assertEqual(challenge["base_address_hash"], self.base_address_hash)
self.assertIn("path", challenge)
def testChallengeTimeout(self):
signed_challenge = self._request_challenge(self.base_address_hash)
challenge = self._get_data_unsafe(signed_challenge)
# Actual timeout is 60 seconds
now_plus_55_sec = self._timestamp(datetime.now() + timedelta(seconds=55))
now_plus_65_sec = self._timestamp(datetime.now() + timedelta(seconds=65))
self.assertIn(challenge["exp"], range(now_plus_55_sec, now_plus_65_sec))
def testChallengeResponse(self):
signed_token = self._login(self.base_address_hash, x_wallet=self.x_wallet)
header = jwt.get_unverified_header(signed_token)
token = self._get_data_unsafe(signed_token)
self.assertEqual(header["alg"], "ES512")
self.assertEqual(token['aud'], "MS2")
def testTokenTimeout(self):
signed_token = self._login(self.base_address_hash, x_wallet=self.x_wallet)
token = self._get_data_unsafe(signed_token)
# Actual timeout is 30 minutes
now_plus_25_min = self._timestamp(datetime.now() + timedelta(minutes=25))
now_plus_35_min = self._timestamp(datetime.now() + timedelta(minutes=35))
self.assertIn(token['exp'], range(now_plus_25_min, now_plus_35_min))
class StateModifyingTestCaseMixin():
def setUp(self):
super(StateModifyingTestCaseMixin, self).setUp()
self.initial_db_state = db.read()
def tearDown(self):
super(StateModifyingTestCaseMixin, self).tearDown()
db.write(self.initial_db_state)
class AdminTests(StateModifyingTestCaseMixin, LedgerJWTServerTestsBase):
def setUp(self):
super(AdminTests, self).setUp()
admin_base_address_hash = "01b0021097fc768ec42c1828be5131e18b479ab210224122e467f144018396df"
test_data = db.get(admin_base_address_hash)
admin_x_wallet = Wallet(chain_code=test_data["chainCode"],
public_key=PublicKey.from_hex_key(test_data["pubKey"]))
self.token = self._login(admin_base_address_hash, x_wallet=admin_x_wallet)
self.new_wallet = Wallet.new_random_wallet()
base_address = wallet_to_addr(self.new_wallet.get_child_for_path(LEDGER_BASE_PATH))
self.base_address_hash = hashlib.sha3_256(base_address.encode("utf-8")).hexdigest()
def _start_registration(self, token, base_address_hash):
return self.app.post(
'/start_registration',
data=json.dumps({"base_address_hash": base_address_hash}),
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
def _register(self, registration_token, pub_key, chain_code):
return self.app.post(
'/register',
data=json.dumps({"x_pub_key": pub_key,
"x_chain_code": chain_code}),
headers={"Authorization": "JWT {}".format(registration_token)},
content_type='application/json'
).data.decode("utf-8")
def _register_new_wallet(self, base_address_hash, wallet):
registration_token = self._start_registration(self.token, base_address_hash)
path = self._get_data_unsafe(registration_token)["path"]
child_wallet = wallet.get_child_for_path(path)
chain_code = child_wallet.chain_code.decode("utf-8")
pub_key = child_wallet.public_key.get_key().decode("utf-8")
return self._register(registration_token, pub_key, chain_code)
def testStartRegistrationToken(self):
registration_token = self._start_registration(self.token, self.base_address_hash)
header = jwt.get_unverified_header(registration_token)
registration_data = self._get_data_unsafe(registration_token)
self.assertEqual(header["alg"], "HS512")
self.assertEqual(registration_data["aud"], "Registration")
self.assertEqual(registration_data["iss"], "Neufund")
self.assertEqual(registration_data["base_address_hash"], self.base_address_hash)
self.assertIn("path", registration_data)
def testStartRegistrationPath(self):
registration_token = self._start_registration(self.token, self.base_address_hash)
registration_data = self._get_data_unsafe(registration_token)
path = registration_data["path"]
self.assertRegexpMatches(path, LEDGER_BASE_PATH + "(/\\d{1,10}'){3}")
def testRegistrationSucceeds(self):
response = self._register_new_wallet(self.base_address_hash, self.new_wallet)
self.assertEqual(response, self.base_address_hash)
def testLoginAfterRegistration(self):
self._register_new_wallet(self.base_address_hash, self.new_wallet)
self._login(self.base_address_hash, wallet=self.new_wallet)
| 42.978947 | 100 | 0.692138 | import hashlib
import json
import unittest
from datetime import datetime, timedelta
import jwt
from multimerchant.wallet import Wallet
from multimerchant.wallet.keys import PublicKey
import db
from config import LEDGER_BASE_PATH
from server import app
from utils import pub_to_addr, wallet_to_addr
TEST_DB = "test.json"
class UtilsTestCase(unittest.TestCase):
def testPubToAddr(self):
pub_key = "04d3c41fb2f0e07d71f10416717e450bceb635d54d9b07dea0327f90bfa82f0da" \
"08b40aafd480811d4aba8c17fa768765c6a897009e000f9249c299724fd567414"
address = "0x670884349dd0e57bd1bb71bb6913e921846ba149"
self.assertEqual(pub_to_addr(pub_key), address)
class LedgerJWTServerTestsBase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.config["DB_NAME"] = TEST_DB
def _request_challenge(self, base_address_hash):
return self.app.post(
'/challenge',
data=json.dumps({"base_address_hash": base_address_hash}),
content_type='application/json'
).data.decode("utf-8")
def _solve_challenge(self, token, response_address):
return self.app.post(
'/response',
data=json.dumps({"address": response_address}),
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
def _get_user_data(self, token):
return self.app.get(
'/data',
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
@staticmethod
def _get_data_unsafe(signed):
return jwt.decode(signed, options={
'verify_signature': False,
'verify_aud': False
})
def _login(self, base_address_hash, x_wallet=None, wallet=None):
signed_challenge = self._request_challenge(base_address_hash)
challenge = self._get_data_unsafe(signed_challenge)
path = challenge["path"]
if wallet:
child = wallet.get_child_for_path(path)
else:
x_y_path = path[len(LEDGER_BASE_PATH) + 1:]
y_path = "/".join(x_y_path.split('/')[3:])
child = x_wallet.get_child_for_path(y_path)
address = wallet_to_addr(child)
return self._solve_challenge(signed_challenge, address)
@staticmethod
def _timestamp(time):
return int(time.strftime("%s"))
class ChallengeResponseTests(LedgerJWTServerTestsBase):
def setUp(self):
super(ChallengeResponseTests, self).setUp()
self.base_address_hash = "01b0021097fc768ec42c1828be5131e18b479ab210224122e467f144018396df"
test_data = db.get(self.base_address_hash)
self.x_wallet = Wallet(chain_code=test_data["chainCode"],
public_key=PublicKey.from_hex_key(test_data["pubKey"]))
def testChallenge(self):
signed_challenge = self._request_challenge(self.base_address_hash)
header = jwt.get_unverified_header(signed_challenge)
challenge = self._get_data_unsafe(signed_challenge)
self.assertEqual(header["alg"], "HS512")
self.assertEqual(challenge["aud"], "Challenge")
self.assertEqual(challenge["iss"], "Neufund")
self.assertEqual(challenge["base_address_hash"], self.base_address_hash)
self.assertIn("path", challenge)
def testChallengeTimeout(self):
signed_challenge = self._request_challenge(self.base_address_hash)
challenge = self._get_data_unsafe(signed_challenge)
now_plus_55_sec = self._timestamp(datetime.now() + timedelta(seconds=55))
now_plus_65_sec = self._timestamp(datetime.now() + timedelta(seconds=65))
self.assertIn(challenge["exp"], range(now_plus_55_sec, now_plus_65_sec))
def testChallengeResponse(self):
signed_token = self._login(self.base_address_hash, x_wallet=self.x_wallet)
header = jwt.get_unverified_header(signed_token)
token = self._get_data_unsafe(signed_token)
self.assertEqual(header["alg"], "ES512")
self.assertEqual(token['aud'], "MS2")
def testTokenTimeout(self):
signed_token = self._login(self.base_address_hash, x_wallet=self.x_wallet)
token = self._get_data_unsafe(signed_token)
now_plus_25_min = self._timestamp(datetime.now() + timedelta(minutes=25))
now_plus_35_min = self._timestamp(datetime.now() + timedelta(minutes=35))
self.assertIn(token['exp'], range(now_plus_25_min, now_plus_35_min))
class StateModifyingTestCaseMixin():
def setUp(self):
super(StateModifyingTestCaseMixin, self).setUp()
self.initial_db_state = db.read()
def tearDown(self):
super(StateModifyingTestCaseMixin, self).tearDown()
db.write(self.initial_db_state)
class AdminTests(StateModifyingTestCaseMixin, LedgerJWTServerTestsBase):
def setUp(self):
super(AdminTests, self).setUp()
admin_base_address_hash = "01b0021097fc768ec42c1828be5131e18b479ab210224122e467f144018396df"
test_data = db.get(admin_base_address_hash)
admin_x_wallet = Wallet(chain_code=test_data["chainCode"],
public_key=PublicKey.from_hex_key(test_data["pubKey"]))
self.token = self._login(admin_base_address_hash, x_wallet=admin_x_wallet)
self.new_wallet = Wallet.new_random_wallet()
base_address = wallet_to_addr(self.new_wallet.get_child_for_path(LEDGER_BASE_PATH))
self.base_address_hash = hashlib.sha3_256(base_address.encode("utf-8")).hexdigest()
def _start_registration(self, token, base_address_hash):
return self.app.post(
'/start_registration',
data=json.dumps({"base_address_hash": base_address_hash}),
headers={"Authorization": "JWT {}".format(token)},
content_type='application/json'
).data.decode("utf-8")
def _register(self, registration_token, pub_key, chain_code):
return self.app.post(
'/register',
data=json.dumps({"x_pub_key": pub_key,
"x_chain_code": chain_code}),
headers={"Authorization": "JWT {}".format(registration_token)},
content_type='application/json'
).data.decode("utf-8")
def _register_new_wallet(self, base_address_hash, wallet):
registration_token = self._start_registration(self.token, base_address_hash)
path = self._get_data_unsafe(registration_token)["path"]
child_wallet = wallet.get_child_for_path(path)
chain_code = child_wallet.chain_code.decode("utf-8")
pub_key = child_wallet.public_key.get_key().decode("utf-8")
return self._register(registration_token, pub_key, chain_code)
def testStartRegistrationToken(self):
registration_token = self._start_registration(self.token, self.base_address_hash)
header = jwt.get_unverified_header(registration_token)
registration_data = self._get_data_unsafe(registration_token)
self.assertEqual(header["alg"], "HS512")
self.assertEqual(registration_data["aud"], "Registration")
self.assertEqual(registration_data["iss"], "Neufund")
self.assertEqual(registration_data["base_address_hash"], self.base_address_hash)
self.assertIn("path", registration_data)
def testStartRegistrationPath(self):
registration_token = self._start_registration(self.token, self.base_address_hash)
registration_data = self._get_data_unsafe(registration_token)
path = registration_data["path"]
self.assertRegexpMatches(path, LEDGER_BASE_PATH + "(/\\d{1,10}'){3}")
def testRegistrationSucceeds(self):
response = self._register_new_wallet(self.base_address_hash, self.new_wallet)
self.assertEqual(response, self.base_address_hash)
def testLoginAfterRegistration(self):
self._register_new_wallet(self.base_address_hash, self.new_wallet)
self._login(self.base_address_hash, wallet=self.new_wallet)
| true | true |
1c3cedc18c59521453aa7bf4124d6ee5252fddae | 610 | py | Python | bas03_lo_temp.py | bokunimowakaru/xbee3_micropython | cb18e8dbf72749b70dfc5bd70de6dfa598fefaa2 | [
"MIT"
] | 1 | 2021-09-29T13:32:18.000Z | 2021-09-29T13:32:18.000Z | bas03_lo_temp.py | bokunimowakaru/xbee3_micropython | cb18e8dbf72749b70dfc5bd70de6dfa598fefaa2 | [
"MIT"
] | null | null | null | bas03_lo_temp.py | bokunimowakaru/xbee3_micropython | cb18e8dbf72749b70dfc5bd70de6dfa598fefaa2 | [
"MIT"
] | 2 | 2019-07-18T19:27:36.000Z | 2020-07-16T10:50:56.000Z | # MicroPython XBee3 ZigBee
# coding: utf-8
'''
内蔵温度センサと電源電圧の値を読み取る
Copyright (c) 2018-2019 Wataru KUNINO
'''
import xbee
import time
TEMP_OFFSET=14.0 # 内部温度上昇
def getTemp(): # getTemp関数を定義する
temp = xbee.atcmd('TP') - TEMP_OFFSET # XBeeモジュールから温度値を取得する
volt = xbee.atcmd('%V') / 1000 # XBeeモジュールから電圧値を取得する
return {'temp':temp, 'volt':volt} # 取得結果を応答する
while True:
print(getTemp()) # センサ値を取得
time.sleep_ms(3000) # 3秒間の待ち時間処理
| 32.105263 | 87 | 0.511475 |
import xbee
import time
TEMP_OFFSET=14.0
def getTemp():
temp = xbee.atcmd('TP') - TEMP_OFFSET
volt = xbee.atcmd('%V') / 1000
return {'temp':temp, 'volt':volt}
while True:
print(getTemp())
time.sleep_ms(3000)
| true | true |
1c3ceec81e623535b8049484686f29b4c59b1e81 | 368 | py | Python | setting.py | arthur37231/bilibili-followers-checker | 11da0d1e8c0381162b8eb407fad98962109e3c52 | [
"MIT"
] | null | null | null | setting.py | arthur37231/bilibili-followers-checker | 11da0d1e8c0381162b8eb407fad98962109e3c52 | [
"MIT"
] | null | null | null | setting.py | arthur37231/bilibili-followers-checker | 11da0d1e8c0381162b8eb407fad98962109e3c52 | [
"MIT"
] | null | null | null | FOLDER_RESOURCE = "data"
PATH_RESOURCE = "/static"
SUB_FOLDER_IMAGE = "img"
EXPIRATION_TOKEN_HOURS = 24*7
CONF_POSTGRES = {
'db_type': 'postgresql',
'host': 'localhost',
'port': 5432,
'user': 'USERNAME', # PostgreSQL username
'db_name': 'DBNAME', # PostgreSQL Database Name
'db_password': 'PASSWORD' # PostgreSQL password
}
| 23 | 58 | 0.638587 | FOLDER_RESOURCE = "data"
PATH_RESOURCE = "/static"
SUB_FOLDER_IMAGE = "img"
EXPIRATION_TOKEN_HOURS = 24*7
CONF_POSTGRES = {
'db_type': 'postgresql',
'host': 'localhost',
'port': 5432,
'user': 'USERNAME',
'db_name': 'DBNAME',
'db_password': 'PASSWORD'
}
| true | true |
1c3cf1fd77aec93d54823cbc15420e3cc520a433 | 32,432 | py | Python | pandapower/estimation/ppc_conversions.py | HaoranDennis/pandapower | 22c8680d3373879e792fe7478bd2dde4ea8cb018 | [
"BSD-3-Clause"
] | 1 | 2019-03-14T05:27:43.000Z | 2019-03-14T05:27:43.000Z | pandapower/estimation/ppc_conversions.py | HaoranDennis/pandapower | 22c8680d3373879e792fe7478bd2dde4ea8cb018 | [
"BSD-3-Clause"
] | null | null | null | pandapower/estimation/ppc_conversions.py | HaoranDennis/pandapower | 22c8680d3373879e792fe7478bd2dde4ea8cb018 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2019 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pandas as pd
from pandapower.auxiliary import _select_is_elements_numba, _add_ppc_options, _add_auxiliary_elements
from pandapower.pd2ppc import _pd2ppc
from pandapower.estimation.idx_bus import *
from pandapower.estimation.idx_brch import *
from pandapower.pypower.idx_brch import branch_cols
from pandapower.pypower.idx_bus import bus_cols
from pandapower.pf.run_newton_raphson_pf import _run_dc_pf
from pandapower.run import rundcpp
from pandapower.build_branch import get_is_lines
from pandapower.create import create_buses, create_line_from_parameters
try:
import pplog as logging
except ImportError:
import logging
std_logger = logging.getLogger(__name__)
AUX_BUS_NAME, AUX_LINE_NAME, AUX_SWITCH_NAME =\
"aux_bus_se", "aux_line_se", "aux_bbswitch_se"
def _add_aux_elements_for_bb_switch(net, bus_to_be_fused):
"""
Add auxiliary elements (bus, bb switch, line) to the pandapower net to avoid
automatic fuse of buses connected with bb switch with elements on it
:param net: pandapower net
:return: None
"""
def get_bus_branch_mapping(net, bus_to_be_fused):
bus_with_elements = set(net.load.bus).union(set(net.sgen.bus)).union(
set(net.shunt.bus)).union(set(net.gen.bus)).union(
set(net.ext_grid.bus)).union(set(net.ward.bus)).union(
set(net.xward.bus))
# bus_with_pq_measurement = set(net.measurement[(net.measurement.measurement_type=='p')&(net.measurement.element_type=='bus')].element.values)
# bus_with_elements = bus_with_elements.union(bus_with_pq_measurement)
bus_ppci = pd.DataFrame(data=net._pd2ppc_lookups['bus'], columns=["bus_ppci"])
bus_ppci['bus_with_elements'] = bus_ppci.index.isin(bus_with_elements)
existed_bus = bus_ppci[bus_ppci.index.isin(net.bus.index)]
bus_ppci['vn_kv'] = net.bus.loc[existed_bus.index, 'vn_kv']
ppci_bus_with_elements = bus_ppci.groupby('bus_ppci')['bus_with_elements'].sum()
bus_ppci.loc[:, 'elements_in_cluster'] = ppci_bus_with_elements[bus_ppci['bus_ppci'].values].values
bus_ppci['bus_to_be_fused'] = False
if bus_to_be_fused is not None:
bus_ppci.loc[bus_to_be_fused, 'bus_to_be_fused'] = True
bus_cluster_to_be_fused_mask = bus_ppci.groupby('bus_ppci')['bus_to_be_fused'].any()
bus_ppci.loc[bus_cluster_to_be_fused_mask[bus_ppci['bus_ppci'].values].values, 'bus_to_be_fused'] = True
return bus_ppci
# find the buses which was fused together in the pp2ppc conversion with elements on them
# the first one will be skipped
rundcpp(net)
bus_ppci_mapping = get_bus_branch_mapping(net, bus_to_be_fused)
bus_to_be_handled = bus_ppci_mapping[(bus_ppci_mapping ['elements_in_cluster']>=2)&\
bus_ppci_mapping ['bus_with_elements']&\
(~bus_ppci_mapping ['bus_to_be_fused'])]
bus_to_be_handled = bus_to_be_handled[bus_to_be_handled['bus_ppci'].duplicated(keep='first')]
# create auxiliary buses for the buses need to be handled
aux_bus_index = create_buses(net, bus_to_be_handled.shape[0], bus_to_be_handled.vn_kv.values,
name=AUX_BUS_NAME)
bus_aux_mapping = pd.Series(aux_bus_index, index=bus_to_be_handled.index.values)
# create auxiliary switched and disable original switches connected to the related buses
net.switch.loc[:, 'original_closed'] = net.switch.loc[:, 'closed']
switch_to_be_replaced_sel = ((net.switch.et == 'b') &
(net.switch.element.isin(bus_to_be_handled.index) |
net.switch.bus.isin(bus_to_be_handled.index)))
net.switch.loc[switch_to_be_replaced_sel, 'closed'] = False
# create aux switches with selecting the existed switches
aux_switch = net.switch.loc[switch_to_be_replaced_sel, ['bus', 'closed', 'element',
'et', 'name', 'original_closed']]
aux_switch.loc[:,'name'] = AUX_SWITCH_NAME
# replace the original bus with the correspondent auxiliary bus
bus_to_be_replaced = aux_switch.loc[aux_switch.bus.isin(bus_to_be_handled.index), 'bus']
element_to_be_replaced = aux_switch.loc[aux_switch.element.isin(bus_to_be_handled.index), 'element']
aux_switch.loc[bus_to_be_replaced.index, 'bus'] =\
bus_aux_mapping[bus_to_be_replaced].values.astype(int)
aux_switch.loc[element_to_be_replaced.index, 'element'] =\
bus_aux_mapping[element_to_be_replaced].values.astype(int)
aux_switch['closed'] = aux_switch['original_closed']
net.switch = net.switch.append(aux_switch, ignore_index=True)
# PY34 compatibility
# net.switch = net.switch.append(aux_switch, ignore_index=True, sort=False)
# create auxiliary lines as small impedance
for bus_ori, bus_aux in bus_aux_mapping.iteritems():
create_line_from_parameters(net, bus_ori, bus_aux, length_km=1, name=AUX_LINE_NAME,
r_ohm_per_km=0.15, x_ohm_per_km=0.2, c_nf_per_km=0, max_i_ka=1)
def _drop_aux_elements_for_bb_switch(net):
"""
Remove auxiliary elements (bus, bb switch, line) added by
_add_aux_elements_for_bb_switch function
:param net: pandapower net
:return: None
"""
# Remove auxiliary switches and restore switch status
net.switch = net.switch[net.switch.name!=AUX_SWITCH_NAME]
if 'original_closed' in net.switch.columns:
net.switch.loc[:, 'closed'] = net.switch.loc[:, 'original_closed']
net.switch.drop('original_closed', axis=1, inplace=True)
# Remove auxiliary buses, lines in net and result
for key in net.keys():
if key.startswith('res_bus'):
net[key] = net[key].loc[(net.bus.name != AUX_BUS_NAME).values, :]
if key.startswith('res_line'):
net[key] = net[key].loc[(net.line.name != AUX_LINE_NAME).values, :]
net.bus = net.bus.loc[(net.bus.name != AUX_BUS_NAME).values, :]
net.line = net.line.loc[(net.line.name != AUX_LINE_NAME).values, :]
def _init_ppc(net, v_start, delta_start, calculate_voltage_angles):
# select elements in service and convert pandapower ppc to ppc
net._options = {}
_add_ppc_options(net, check_connectivity=False, init_vm_pu=v_start, init_va_degree=delta_start,
trafo_model="pi", mode="pf", enforce_q_lims=False,
calculate_voltage_angles=calculate_voltage_angles, r_switch=0.0,
recycle=dict(_is_elements=False, ppc=False, Ybus=False))
net["_is_elements"] = _select_is_elements_numba(net)
_add_auxiliary_elements(net)
ppc, ppci = _pd2ppc(net)
# do dc power flow for phase shifting transformers
if np.any(net.trafo.shift_degree):
vm_backup = ppci["bus"][:, 7].copy()
ppci["bus"][:, [2, 3]] = 0.
ppci = _run_dc_pf(ppci)
ppci["bus"][:, 7] = vm_backup
return ppc, ppci
def _add_measurements_to_ppc(net, ppci, zero_injection):
"""
Add pandapower measurements to the ppci structure by adding new columns
:param net: pandapower net
:param ppci: generated ppci
:return: ppc with added columns
"""
meas = net.measurement.copy(deep=False)
meas["side"] = meas.apply(lambda row:
net['line']["{}_bus".format(row["side"])].loc[row["element"]] if
row["side"] in ("from", "to") else
net[row["element_type"]][row["side"]+'_bus'].loc[row["element"]] if
row["side"] in ("hv", "mv", "lv") else row["side"], axis=1)
map_bus = net["_pd2ppc_lookups"]["bus"]
meas_bus = meas[(meas['element_type'] == 'bus')]
if (map_bus[meas_bus['element'].values.astype(int)] >= ppci["bus"].shape[0]).any():
std_logger.warning("Measurement defined in pp-grid does not exist in ppci! Will be deleted!")
meas_bus = meas_bus[map_bus[meas_bus['element'].values.astype(int)] < ppci["bus"].shape[0]]
# mapping to dict instead of np array ensures good performance for large indices
# (e.g., 999999999 requires a large np array even if there are only 2 buses)
# downside is loop comprehension to access the map
map_line, map_trafo, map_trafo3w = None, None, None
branch_mask = ppci['internal']['branch_is']
if "line" in net["_pd2ppc_lookups"]["branch"]:
map_line = {line_ix: br_ix for line_ix, br_ix in
zip(net.line.index, range(*net["_pd2ppc_lookups"]["branch"]["line"])) if branch_mask[br_ix]}
if "trafo" in net["_pd2ppc_lookups"]["branch"]:
trafo_ix_start, trafo_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo"]
trafo_ix_offset = np.sum(~branch_mask[:trafo_ix_start])
trafo_ix_start, trafo_ix_end = trafo_ix_start - trafo_ix_offset, trafo_ix_end - trafo_ix_offset
map_trafo = {trafo_ix: br_ix for trafo_ix, br_ix in
zip(net.trafo.index, range(trafo_ix_start, trafo_ix_end))
if branch_mask[br_ix+trafo_ix_offset]}
if "trafo3w" in net["_pd2ppc_lookups"]["branch"]:
trafo3w_ix_start, trafo3w_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo3w"]
trafo3w_ix_offset = np.sum(~branch_mask[:trafo3w_ix_start])
num_trafo3w = net.trafo3w.shape[0]
trafo3w_ix_start, trafo3w_ix_end = trafo3w_ix_start - trafo3w_ix_offset, trafo3w_ix_end - trafo3w_ix_offset
map_trafo3w = {trafo3w_ix: {'hv': br_ix, 'mv': br_ix+num_trafo3w, 'lv': br_ix+2*num_trafo3w}
for trafo3w_ix, br_ix in
zip(net.trafo3w.index, range(trafo3w_ix_start, trafo3w_ix_start+num_trafo3w))
if branch_mask[br_ix+trafo3w_ix_offset]}
# set measurements for ppc format
# add 9 columns to ppc[bus] for Vm, Vm std dev, P, P std dev, Q, Q std dev,
# pandapower measurement indices V, P, Q
bus_append = np.full((ppci["bus"].shape[0], bus_cols_se), np.nan, dtype=ppci["bus"].dtype)
v_measurements = meas_bus[(meas_bus.measurement_type == "v")]
if len(v_measurements):
bus_positions = map_bus[v_measurements.element.values.astype(int)]
bus_append[bus_positions, VM] = v_measurements.value.values
bus_append[bus_positions, VM_STD] = v_measurements.std_dev.values
bus_append[bus_positions, VM_IDX] = v_measurements.index.values
p_measurements = meas_bus[(meas_bus.measurement_type == "p")]
if len(p_measurements):
bus_positions = map_bus[p_measurements.element.values.astype(int)]
unique_bus_positions = np.unique(bus_positions)
if len(unique_bus_positions) < len(bus_positions):
std_logger.warning("P Measurement duplication will be automatically merged!")
for bus in unique_bus_positions:
p_meas_on_bus = p_measurements.iloc[np.argwhere(bus_positions==bus).ravel(), :]
bus_append[bus, P] = p_meas_on_bus.value.sum()
bus_append[bus, P_STD] = p_meas_on_bus.std_dev.max()
bus_append[bus, P_IDX] = p_meas_on_bus.index[0]
else:
bus_append[bus_positions, P] = p_measurements.value.values
bus_append[bus_positions, P_STD] = p_measurements.std_dev.values
bus_append[bus_positions, P_IDX] = p_measurements.index.values
q_measurements = meas_bus[(meas_bus.measurement_type == "q")]
if len(q_measurements):
bus_positions = map_bus[q_measurements.element.values.astype(int)]
unique_bus_positions = np.unique(bus_positions)
if len(unique_bus_positions) < len(bus_positions):
std_logger.warning("Q Measurement duplication will be automatically merged!")
for bus in unique_bus_positions:
q_meas_on_bus = q_measurements.iloc[np.argwhere(bus_positions==bus).ravel(), :]
bus_append[bus, Q] = q_meas_on_bus.value.sum()
bus_append[bus, Q_STD] = q_meas_on_bus.std_dev.max()
bus_append[bus, Q_IDX] = q_meas_on_bus.index[0]
else:
bus_positions = map_bus[q_measurements.element.values.astype(int)]
bus_append[bus_positions, Q] = q_measurements.value.values
bus_append[bus_positions, Q_STD] = q_measurements.std_dev.values
bus_append[bus_positions, Q_IDX] = q_measurements.index.values
#add zero injection measurement and labels defined in parameter zero_injection
bus_append = _add_zero_injection(net, ppci, bus_append, zero_injection)
# add virtual measurements for artificial buses, which were created because
# of an open line switch. p/q are 0. and std dev is 1. (small value)
new_in_line_buses = np.setdiff1d(np.arange(ppci["bus"].shape[0]), map_bus[map_bus >= 0])
bus_append[new_in_line_buses, 2] = 0.
bus_append[new_in_line_buses, 3] = 1.
bus_append[new_in_line_buses, 4] = 0.
bus_append[new_in_line_buses, 5] = 1.
# add 15 columns to mpc[branch] for Im_from, Im_from std dev, Im_to, Im_to std dev,
# P_from, P_from std dev, P_to, P_to std dev, Q_from, Q_from std dev, Q_to, Q_to std dev,
# pandapower measurement index I, P, Q
branch_append = np.full((ppci["branch"].shape[0], branch_cols_se),
np.nan, dtype=ppci["branch"].dtype)
if map_line is not None:
i_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "line") &\
meas.element.isin(map_line)]
if len(i_measurements):
meas_from = i_measurements[(i_measurements.side.values.astype(int) ==
net.line.from_bus[i_measurements.element]).values]
meas_to = i_measurements[(i_measurements.side.values.astype(int) ==
net.line.to_bus[i_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3
i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values
branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_IDX] = meas_to.index.values
p_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "line") &
meas.element.isin(map_line)]
if len(p_measurements):
meas_from = p_measurements[(p_measurements.side.values.astype(int) ==
net.line.from_bus[p_measurements.element]).values]
meas_to = p_measurements[(p_measurements.side.values.astype(int) ==
net.line.to_bus[p_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
branch_append[ix_from, P_FROM] = meas_from.value.values
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, P_FROM_IDX] = meas_from.index.values
branch_append[ix_to, P_TO] = meas_to.value.values
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, P_TO_IDX] = meas_to.index.values
q_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "line") &
meas.element.isin(map_line)]
if len(q_measurements):
meas_from = q_measurements[(q_measurements.side.values.astype(int) ==
net.line.from_bus[q_measurements.element]).values]
meas_to = q_measurements[(q_measurements.side.values.astype(int) ==
net.line.to_bus[q_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
branch_append[ix_from, Q_FROM] = meas_from.value.values
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values
branch_append[ix_to, Q_TO] = meas_to.value.values
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, Q_TO_IDX] = meas_to.index.values
# TODO review in 2019 -> is this a use case? create test with switches on lines
# determine number of lines in ppci["branch"]
# out of service lines and lines with open switches at both ends are not in the ppci
# _is_elements = net["_is_elements"]
# if "line" not in _is_elements:
# get_is_lines(net)
# lines_is = _is_elements['line']
# bus_is_idx = _is_elements['bus_is_idx']
# slidx = (net["switch"]["closed"].values == 0) \
# & (net["switch"]["et"].values == "l") \
# & (np.in1d(net["switch"]["element"].values, lines_is.index)) \
# & (np.in1d(net["switch"]["bus"].values, bus_is_idx))
# ppci_lines = len(lines_is) - np.count_nonzero(slidx)
if map_trafo is not None:
i_tr_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(i_tr_measurements):
meas_from = i_tr_measurements[(i_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[i_tr_measurements.element]).values]
meas_to = i_tr_measurements[(i_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[i_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3
i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values
branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_IDX] = meas_to.index.values
p_tr_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(p_tr_measurements):
meas_from = p_tr_measurements[(p_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[p_tr_measurements.element]).values]
meas_to = p_tr_measurements[(p_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[p_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
branch_append[ix_from, P_FROM] = meas_from.value.values
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, P_FROM_IDX] = meas_from.index.values
branch_append[ix_to, P_TO] = meas_to.value.values
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, P_TO_IDX] = meas_to.index.values
q_tr_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(q_tr_measurements):
meas_from = q_tr_measurements[(q_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[q_tr_measurements.element]).values]
meas_to = q_tr_measurements[(q_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[q_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
branch_append[ix_from, Q_FROM] = meas_from.value.values
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values
branch_append[ix_to, Q_TO] = meas_to.value.values
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, Q_TO_IDX] = meas_to.index.values
# Add measurements for trafo3w
if map_trafo3w is not None:
i_tr3w_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(i_tr3w_measurements):
meas_hv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[i_tr3w_measurements.element]).values]
meas_mv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[i_tr3w_measurements.element]).values]
meas_lv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[i_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
i_ka_to_pu_hv = (net.bus.vn_kv[meas_hv.side]).values
i_ka_to_pu_mv = (net.bus.vn_kv[meas_mv.side]).values
i_ka_to_pu_lv = (net.bus.vn_kv[meas_lv.side]).values
branch_append[ix_hv, IM_FROM] = meas_hv.value.values * i_ka_to_pu_hv
branch_append[ix_hv, IM_FROM_STD] = meas_hv.std_dev.values * i_ka_to_pu_hv
branch_append[ix_hv, IM_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, IM_TO] = meas_mv.value.values * i_ka_to_pu_mv
branch_append[ix_mv, IM_TO_STD] = meas_mv.std_dev.values * i_ka_to_pu_mv
branch_append[ix_mv, IM_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, IM_TO] = meas_lv.value.values * i_ka_to_pu_lv
branch_append[ix_lv, IM_TO_STD] = meas_lv.std_dev.values * i_ka_to_pu_lv
branch_append[ix_lv, IM_TO_IDX] = meas_lv.index.values
p_tr3w_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(p_tr3w_measurements):
meas_hv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[p_tr3w_measurements.element]).values]
meas_mv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[p_tr3w_measurements.element]).values]
meas_lv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[p_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
branch_append[ix_hv, P_FROM] = meas_hv.value.values
branch_append[ix_hv, P_FROM_STD] = meas_hv.std_dev.values
branch_append[ix_hv, P_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, P_TO] = meas_mv.value.values
branch_append[ix_mv, P_TO_STD] = meas_mv.std_dev.values
branch_append[ix_mv, P_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, P_TO] = meas_lv.value.values
branch_append[ix_lv, P_TO_STD] = meas_lv.std_dev.values
branch_append[ix_lv, P_TO_IDX] = meas_lv.index.values
q_tr3w_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(q_tr3w_measurements):
meas_hv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[q_tr3w_measurements.element]).values]
meas_mv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[q_tr3w_measurements.element]).values]
meas_lv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[q_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
branch_append[ix_hv, Q_FROM] = meas_hv.value.values
branch_append[ix_hv, Q_FROM_STD] = meas_hv.std_dev.values
branch_append[ix_hv, Q_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, Q_TO] = meas_mv.value.values
branch_append[ix_mv, Q_TO_STD] = meas_mv.std_dev.values
branch_append[ix_mv, Q_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, Q_TO] = meas_lv.value.values
branch_append[ix_lv, Q_TO_STD] = meas_lv.std_dev.values
branch_append[ix_lv, Q_TO_IDX] = meas_lv.index.values
ppci["bus"] = np.hstack((ppci["bus"], bus_append))
ppci["branch"] = np.hstack((ppci["branch"], branch_append))
return ppci
def _add_zero_injection(net, ppci, bus_append, zero_injection):
"""
Add zero injection labels to the ppci structure and add virtual measurements to those buses
:param net: pandapower net
:param ppci: generated ppci
:param bus_append: added columns to the ppci bus with zero injection label
:param zero_injection: parameter to control which bus to be identified as zero injection
:return bus_append: added columns
"""
bus_append[:, ZERO_INJ_FLAG] = False
if zero_injection is not None:
# identify aux bus to zero injection
if net._pd2ppc_lookups['aux']:
aux_bus_lookup = np.concatenate([v for k,v in net._pd2ppc_lookups['aux'].items() if k != 'xward'])
aux_bus = net._pd2ppc_lookups['bus'][aux_bus_lookup]
bus_append[aux_bus, ZERO_INJ_FLAG] = True
if isinstance(zero_injection, str):
if zero_injection == 'auto':
# identify bus without elements and pq measurements as zero injection
zero_inj_bus_mask = (ppci["bus"][:, 1] == 1) & (ppci["bus"][:, 2:6]==0).all(axis=1) &\
np.isnan(bus_append[:, P:(Q_STD+1)]).all(axis=1)
bus_append[zero_inj_bus_mask, ZERO_INJ_FLAG] = True
elif zero_injection != "aux_bus":
raise UserWarning("zero injection parameter is not correctly initialized")
elif hasattr(zero_injection, '__iter__'):
zero_inj_bus = net._pd2ppc_lookups['bus'][zero_injection]
bus_append[zero_inj_bus, ZERO_INJ_FLAG] = True
zero_inj_bus = np.argwhere(bus_append[:, ZERO_INJ_FLAG]).ravel()
bus_append[zero_inj_bus, P] = 0
bus_append[zero_inj_bus, P_STD] = 1
bus_append[zero_inj_bus, Q] = 0
bus_append[zero_inj_bus, Q_STD] = 1
return bus_append
def _build_measurement_vectors(ppci):
"""
Building measurement vector z, pandapower to ppci measurement mapping and covariance matrix R
:param ppci: generated ppci which contains the measurement columns
:param branch_cols: number of columns in original ppci["branch"] without measurements
:param bus_cols: number of columns in original ppci["bus"] without measurements
:return: both created vectors
"""
p_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + P])
p_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_FROM])
p_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_TO])
q_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + Q])
q_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_FROM])
q_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_TO])
v_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + VM])
i_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_FROM])
i_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_TO])
# piece together our measurement vector z
z = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO],
ppci["bus"][q_bus_not_nan, bus_cols + Q],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO],
ppci["bus"][v_bus_not_nan, bus_cols + VM],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO]
)).real.astype(np.float64)
# conserve the pandapower indices of measurements in the ppci order
pp_meas_indices = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_IDX],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_IDX],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_IDX],
ppci["bus"][q_bus_not_nan, bus_cols + Q_IDX],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_IDX],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_IDX],
ppci["bus"][v_bus_not_nan, bus_cols + VM_IDX],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_IDX],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_IDX]
)).real.astype(int)
# Covariance matrix R
r_cov = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_STD],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_STD],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_STD],
ppci["bus"][q_bus_not_nan, bus_cols + Q_STD],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_STD],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_STD],
ppci["bus"][v_bus_not_nan, bus_cols + VM_STD],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_STD],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_STD]
)).real.astype(np.float64)
return z, pp_meas_indices, r_cov
| 60.734082 | 149 | 0.640047 |
import numpy as np
import pandas as pd
from pandapower.auxiliary import _select_is_elements_numba, _add_ppc_options, _add_auxiliary_elements
from pandapower.pd2ppc import _pd2ppc
from pandapower.estimation.idx_bus import *
from pandapower.estimation.idx_brch import *
from pandapower.pypower.idx_brch import branch_cols
from pandapower.pypower.idx_bus import bus_cols
from pandapower.pf.run_newton_raphson_pf import _run_dc_pf
from pandapower.run import rundcpp
from pandapower.build_branch import get_is_lines
from pandapower.create import create_buses, create_line_from_parameters
try:
import pplog as logging
except ImportError:
import logging
std_logger = logging.getLogger(__name__)
AUX_BUS_NAME, AUX_LINE_NAME, AUX_SWITCH_NAME =\
"aux_bus_se", "aux_line_se", "aux_bbswitch_se"
def _add_aux_elements_for_bb_switch(net, bus_to_be_fused):
def get_bus_branch_mapping(net, bus_to_be_fused):
bus_with_elements = set(net.load.bus).union(set(net.sgen.bus)).union(
set(net.shunt.bus)).union(set(net.gen.bus)).union(
set(net.ext_grid.bus)).union(set(net.ward.bus)).union(
set(net.xward.bus))
bus_ppci = pd.DataFrame(data=net._pd2ppc_lookups['bus'], columns=["bus_ppci"])
bus_ppci['bus_with_elements'] = bus_ppci.index.isin(bus_with_elements)
existed_bus = bus_ppci[bus_ppci.index.isin(net.bus.index)]
bus_ppci['vn_kv'] = net.bus.loc[existed_bus.index, 'vn_kv']
ppci_bus_with_elements = bus_ppci.groupby('bus_ppci')['bus_with_elements'].sum()
bus_ppci.loc[:, 'elements_in_cluster'] = ppci_bus_with_elements[bus_ppci['bus_ppci'].values].values
bus_ppci['bus_to_be_fused'] = False
if bus_to_be_fused is not None:
bus_ppci.loc[bus_to_be_fused, 'bus_to_be_fused'] = True
bus_cluster_to_be_fused_mask = bus_ppci.groupby('bus_ppci')['bus_to_be_fused'].any()
bus_ppci.loc[bus_cluster_to_be_fused_mask[bus_ppci['bus_ppci'].values].values, 'bus_to_be_fused'] = True
return bus_ppci
rundcpp(net)
bus_ppci_mapping = get_bus_branch_mapping(net, bus_to_be_fused)
bus_to_be_handled = bus_ppci_mapping[(bus_ppci_mapping ['elements_in_cluster']>=2)&\
bus_ppci_mapping ['bus_with_elements']&\
(~bus_ppci_mapping ['bus_to_be_fused'])]
bus_to_be_handled = bus_to_be_handled[bus_to_be_handled['bus_ppci'].duplicated(keep='first')]
aux_bus_index = create_buses(net, bus_to_be_handled.shape[0], bus_to_be_handled.vn_kv.values,
name=AUX_BUS_NAME)
bus_aux_mapping = pd.Series(aux_bus_index, index=bus_to_be_handled.index.values)
net.switch.loc[:, 'original_closed'] = net.switch.loc[:, 'closed']
switch_to_be_replaced_sel = ((net.switch.et == 'b') &
(net.switch.element.isin(bus_to_be_handled.index) |
net.switch.bus.isin(bus_to_be_handled.index)))
net.switch.loc[switch_to_be_replaced_sel, 'closed'] = False
aux_switch = net.switch.loc[switch_to_be_replaced_sel, ['bus', 'closed', 'element',
'et', 'name', 'original_closed']]
aux_switch.loc[:,'name'] = AUX_SWITCH_NAME
bus_to_be_replaced = aux_switch.loc[aux_switch.bus.isin(bus_to_be_handled.index), 'bus']
element_to_be_replaced = aux_switch.loc[aux_switch.element.isin(bus_to_be_handled.index), 'element']
aux_switch.loc[bus_to_be_replaced.index, 'bus'] =\
bus_aux_mapping[bus_to_be_replaced].values.astype(int)
aux_switch.loc[element_to_be_replaced.index, 'element'] =\
bus_aux_mapping[element_to_be_replaced].values.astype(int)
aux_switch['closed'] = aux_switch['original_closed']
net.switch = net.switch.append(aux_switch, ignore_index=True)
for bus_ori, bus_aux in bus_aux_mapping.iteritems():
create_line_from_parameters(net, bus_ori, bus_aux, length_km=1, name=AUX_LINE_NAME,
r_ohm_per_km=0.15, x_ohm_per_km=0.2, c_nf_per_km=0, max_i_ka=1)
def _drop_aux_elements_for_bb_switch(net):
net.switch = net.switch[net.switch.name!=AUX_SWITCH_NAME]
if 'original_closed' in net.switch.columns:
net.switch.loc[:, 'closed'] = net.switch.loc[:, 'original_closed']
net.switch.drop('original_closed', axis=1, inplace=True)
for key in net.keys():
if key.startswith('res_bus'):
net[key] = net[key].loc[(net.bus.name != AUX_BUS_NAME).values, :]
if key.startswith('res_line'):
net[key] = net[key].loc[(net.line.name != AUX_LINE_NAME).values, :]
net.bus = net.bus.loc[(net.bus.name != AUX_BUS_NAME).values, :]
net.line = net.line.loc[(net.line.name != AUX_LINE_NAME).values, :]
def _init_ppc(net, v_start, delta_start, calculate_voltage_angles):
net._options = {}
_add_ppc_options(net, check_connectivity=False, init_vm_pu=v_start, init_va_degree=delta_start,
trafo_model="pi", mode="pf", enforce_q_lims=False,
calculate_voltage_angles=calculate_voltage_angles, r_switch=0.0,
recycle=dict(_is_elements=False, ppc=False, Ybus=False))
net["_is_elements"] = _select_is_elements_numba(net)
_add_auxiliary_elements(net)
ppc, ppci = _pd2ppc(net)
if np.any(net.trafo.shift_degree):
vm_backup = ppci["bus"][:, 7].copy()
ppci["bus"][:, [2, 3]] = 0.
ppci = _run_dc_pf(ppci)
ppci["bus"][:, 7] = vm_backup
return ppc, ppci
def _add_measurements_to_ppc(net, ppci, zero_injection):
meas = net.measurement.copy(deep=False)
meas["side"] = meas.apply(lambda row:
net['line']["{}_bus".format(row["side"])].loc[row["element"]] if
row["side"] in ("from", "to") else
net[row["element_type"]][row["side"]+'_bus'].loc[row["element"]] if
row["side"] in ("hv", "mv", "lv") else row["side"], axis=1)
map_bus = net["_pd2ppc_lookups"]["bus"]
meas_bus = meas[(meas['element_type'] == 'bus')]
if (map_bus[meas_bus['element'].values.astype(int)] >= ppci["bus"].shape[0]).any():
std_logger.warning("Measurement defined in pp-grid does not exist in ppci! Will be deleted!")
meas_bus = meas_bus[map_bus[meas_bus['element'].values.astype(int)] < ppci["bus"].shape[0]]
map_line, map_trafo, map_trafo3w = None, None, None
branch_mask = ppci['internal']['branch_is']
if "line" in net["_pd2ppc_lookups"]["branch"]:
map_line = {line_ix: br_ix for line_ix, br_ix in
zip(net.line.index, range(*net["_pd2ppc_lookups"]["branch"]["line"])) if branch_mask[br_ix]}
if "trafo" in net["_pd2ppc_lookups"]["branch"]:
trafo_ix_start, trafo_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo"]
trafo_ix_offset = np.sum(~branch_mask[:trafo_ix_start])
trafo_ix_start, trafo_ix_end = trafo_ix_start - trafo_ix_offset, trafo_ix_end - trafo_ix_offset
map_trafo = {trafo_ix: br_ix for trafo_ix, br_ix in
zip(net.trafo.index, range(trafo_ix_start, trafo_ix_end))
if branch_mask[br_ix+trafo_ix_offset]}
if "trafo3w" in net["_pd2ppc_lookups"]["branch"]:
trafo3w_ix_start, trafo3w_ix_end = net["_pd2ppc_lookups"]["branch"]["trafo3w"]
trafo3w_ix_offset = np.sum(~branch_mask[:trafo3w_ix_start])
num_trafo3w = net.trafo3w.shape[0]
trafo3w_ix_start, trafo3w_ix_end = trafo3w_ix_start - trafo3w_ix_offset, trafo3w_ix_end - trafo3w_ix_offset
map_trafo3w = {trafo3w_ix: {'hv': br_ix, 'mv': br_ix+num_trafo3w, 'lv': br_ix+2*num_trafo3w}
for trafo3w_ix, br_ix in
zip(net.trafo3w.index, range(trafo3w_ix_start, trafo3w_ix_start+num_trafo3w))
if branch_mask[br_ix+trafo3w_ix_offset]}
bus_append = np.full((ppci["bus"].shape[0], bus_cols_se), np.nan, dtype=ppci["bus"].dtype)
v_measurements = meas_bus[(meas_bus.measurement_type == "v")]
if len(v_measurements):
bus_positions = map_bus[v_measurements.element.values.astype(int)]
bus_append[bus_positions, VM] = v_measurements.value.values
bus_append[bus_positions, VM_STD] = v_measurements.std_dev.values
bus_append[bus_positions, VM_IDX] = v_measurements.index.values
p_measurements = meas_bus[(meas_bus.measurement_type == "p")]
if len(p_measurements):
bus_positions = map_bus[p_measurements.element.values.astype(int)]
unique_bus_positions = np.unique(bus_positions)
if len(unique_bus_positions) < len(bus_positions):
std_logger.warning("P Measurement duplication will be automatically merged!")
for bus in unique_bus_positions:
p_meas_on_bus = p_measurements.iloc[np.argwhere(bus_positions==bus).ravel(), :]
bus_append[bus, P] = p_meas_on_bus.value.sum()
bus_append[bus, P_STD] = p_meas_on_bus.std_dev.max()
bus_append[bus, P_IDX] = p_meas_on_bus.index[0]
else:
bus_append[bus_positions, P] = p_measurements.value.values
bus_append[bus_positions, P_STD] = p_measurements.std_dev.values
bus_append[bus_positions, P_IDX] = p_measurements.index.values
q_measurements = meas_bus[(meas_bus.measurement_type == "q")]
if len(q_measurements):
bus_positions = map_bus[q_measurements.element.values.astype(int)]
unique_bus_positions = np.unique(bus_positions)
if len(unique_bus_positions) < len(bus_positions):
std_logger.warning("Q Measurement duplication will be automatically merged!")
for bus in unique_bus_positions:
q_meas_on_bus = q_measurements.iloc[np.argwhere(bus_positions==bus).ravel(), :]
bus_append[bus, Q] = q_meas_on_bus.value.sum()
bus_append[bus, Q_STD] = q_meas_on_bus.std_dev.max()
bus_append[bus, Q_IDX] = q_meas_on_bus.index[0]
else:
bus_positions = map_bus[q_measurements.element.values.astype(int)]
bus_append[bus_positions, Q] = q_measurements.value.values
bus_append[bus_positions, Q_STD] = q_measurements.std_dev.values
bus_append[bus_positions, Q_IDX] = q_measurements.index.values
bus_append = _add_zero_injection(net, ppci, bus_append, zero_injection)
new_in_line_buses = np.setdiff1d(np.arange(ppci["bus"].shape[0]), map_bus[map_bus >= 0])
bus_append[new_in_line_buses, 2] = 0.
bus_append[new_in_line_buses, 3] = 1.
bus_append[new_in_line_buses, 4] = 0.
bus_append[new_in_line_buses, 5] = 1.
branch_append = np.full((ppci["branch"].shape[0], branch_cols_se),
np.nan, dtype=ppci["branch"].dtype)
if map_line is not None:
i_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "line") &\
meas.element.isin(map_line)]
if len(i_measurements):
meas_from = i_measurements[(i_measurements.side.values.astype(int) ==
net.line.from_bus[i_measurements.element]).values]
meas_to = i_measurements[(i_measurements.side.values.astype(int) ==
net.line.to_bus[i_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3
i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values
branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_IDX] = meas_to.index.values
p_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "line") &
meas.element.isin(map_line)]
if len(p_measurements):
meas_from = p_measurements[(p_measurements.side.values.astype(int) ==
net.line.from_bus[p_measurements.element]).values]
meas_to = p_measurements[(p_measurements.side.values.astype(int) ==
net.line.to_bus[p_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
branch_append[ix_from, P_FROM] = meas_from.value.values
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, P_FROM_IDX] = meas_from.index.values
branch_append[ix_to, P_TO] = meas_to.value.values
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, P_TO_IDX] = meas_to.index.values
q_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "line") &
meas.element.isin(map_line)]
if len(q_measurements):
meas_from = q_measurements[(q_measurements.side.values.astype(int) ==
net.line.from_bus[q_measurements.element]).values]
meas_to = q_measurements[(q_measurements.side.values.astype(int) ==
net.line.to_bus[q_measurements.element]).values]
ix_from = [map_line[l] for l in meas_from.element.values.astype(int)]
ix_to = [map_line[l] for l in meas_to.element.values.astype(int)]
branch_append[ix_from, Q_FROM] = meas_from.value.values
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values
branch_append[ix_to, Q_TO] = meas_to.value.values
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, Q_TO_IDX] = meas_to.index.values
if map_trafo is not None:
i_tr_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(i_tr_measurements):
meas_from = i_tr_measurements[(i_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[i_tr_measurements.element]).values]
meas_to = i_tr_measurements[(i_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[i_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
i_ka_to_pu_from = (net.bus.vn_kv[meas_from.side]).values * 1e3
i_ka_to_pu_to = (net.bus.vn_kv[meas_to.side]).values * 1e3
branch_append[ix_from, IM_FROM] = meas_from.value.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_STD] = meas_from.std_dev.values * i_ka_to_pu_from
branch_append[ix_from, IM_FROM_IDX] = meas_from.index.values
branch_append[ix_to, IM_TO] = meas_to.value.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_STD] = meas_to.std_dev.values * i_ka_to_pu_to
branch_append[ix_to, IM_TO_IDX] = meas_to.index.values
p_tr_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(p_tr_measurements):
meas_from = p_tr_measurements[(p_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[p_tr_measurements.element]).values]
meas_to = p_tr_measurements[(p_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[p_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
branch_append[ix_from, P_FROM] = meas_from.value.values
branch_append[ix_from, P_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, P_FROM_IDX] = meas_from.index.values
branch_append[ix_to, P_TO] = meas_to.value.values
branch_append[ix_to, P_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, P_TO_IDX] = meas_to.index.values
q_tr_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo") &
meas.element.isin(map_trafo)]
if len(q_tr_measurements):
meas_from = q_tr_measurements[(q_tr_measurements.side.values.astype(int) ==
net.trafo.hv_bus[q_tr_measurements.element]).values]
meas_to = q_tr_measurements[(q_tr_measurements.side.values.astype(int) ==
net.trafo.lv_bus[q_tr_measurements.element]).values]
ix_from = [map_trafo[t] for t in meas_from.element.values.astype(int)]
ix_to = [map_trafo[t] for t in meas_to.element.values.astype(int)]
branch_append[ix_from, Q_FROM] = meas_from.value.values
branch_append[ix_from, Q_FROM_STD] = meas_from.std_dev.values
branch_append[ix_from, Q_FROM_IDX] = meas_from.index.values
branch_append[ix_to, Q_TO] = meas_to.value.values
branch_append[ix_to, Q_TO_STD] = meas_to.std_dev.values
branch_append[ix_to, Q_TO_IDX] = meas_to.index.values
if map_trafo3w is not None:
i_tr3w_measurements = meas[(meas.measurement_type == "i") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(i_tr3w_measurements):
meas_hv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[i_tr3w_measurements.element]).values]
meas_mv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[i_tr3w_measurements.element]).values]
meas_lv = i_tr3w_measurements[(i_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[i_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
i_ka_to_pu_hv = (net.bus.vn_kv[meas_hv.side]).values
i_ka_to_pu_mv = (net.bus.vn_kv[meas_mv.side]).values
i_ka_to_pu_lv = (net.bus.vn_kv[meas_lv.side]).values
branch_append[ix_hv, IM_FROM] = meas_hv.value.values * i_ka_to_pu_hv
branch_append[ix_hv, IM_FROM_STD] = meas_hv.std_dev.values * i_ka_to_pu_hv
branch_append[ix_hv, IM_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, IM_TO] = meas_mv.value.values * i_ka_to_pu_mv
branch_append[ix_mv, IM_TO_STD] = meas_mv.std_dev.values * i_ka_to_pu_mv
branch_append[ix_mv, IM_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, IM_TO] = meas_lv.value.values * i_ka_to_pu_lv
branch_append[ix_lv, IM_TO_STD] = meas_lv.std_dev.values * i_ka_to_pu_lv
branch_append[ix_lv, IM_TO_IDX] = meas_lv.index.values
p_tr3w_measurements = meas[(meas.measurement_type == "p") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(p_tr3w_measurements):
meas_hv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[p_tr3w_measurements.element]).values]
meas_mv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[p_tr3w_measurements.element]).values]
meas_lv = p_tr3w_measurements[(p_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[p_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
branch_append[ix_hv, P_FROM] = meas_hv.value.values
branch_append[ix_hv, P_FROM_STD] = meas_hv.std_dev.values
branch_append[ix_hv, P_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, P_TO] = meas_mv.value.values
branch_append[ix_mv, P_TO_STD] = meas_mv.std_dev.values
branch_append[ix_mv, P_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, P_TO] = meas_lv.value.values
branch_append[ix_lv, P_TO_STD] = meas_lv.std_dev.values
branch_append[ix_lv, P_TO_IDX] = meas_lv.index.values
q_tr3w_measurements = meas[(meas.measurement_type == "q") & (meas.element_type == "trafo3w") &
meas.element.isin(map_trafo3w)]
if len(q_tr3w_measurements):
meas_hv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.hv_bus[q_tr3w_measurements.element]).values]
meas_mv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.mv_bus[q_tr3w_measurements.element]).values]
meas_lv = q_tr3w_measurements[(q_tr3w_measurements.side.values.astype(int) ==
net.trafo3w.lv_bus[q_tr3w_measurements.element]).values]
ix_hv = [map_trafo3w[t]['hv'] for t in meas_hv.element.values.astype(int)]
ix_mv = [map_trafo3w[t]['mv'] for t in meas_mv.element.values.astype(int)]
ix_lv = [map_trafo3w[t]['lv'] for t in meas_lv.element.values.astype(int)]
branch_append[ix_hv, Q_FROM] = meas_hv.value.values
branch_append[ix_hv, Q_FROM_STD] = meas_hv.std_dev.values
branch_append[ix_hv, Q_FROM_IDX] = meas_hv.index.values
branch_append[ix_mv, Q_TO] = meas_mv.value.values
branch_append[ix_mv, Q_TO_STD] = meas_mv.std_dev.values
branch_append[ix_mv, Q_TO_IDX] = meas_mv.index.values
branch_append[ix_lv, Q_TO] = meas_lv.value.values
branch_append[ix_lv, Q_TO_STD] = meas_lv.std_dev.values
branch_append[ix_lv, Q_TO_IDX] = meas_lv.index.values
ppci["bus"] = np.hstack((ppci["bus"], bus_append))
ppci["branch"] = np.hstack((ppci["branch"], branch_append))
return ppci
def _add_zero_injection(net, ppci, bus_append, zero_injection):
bus_append[:, ZERO_INJ_FLAG] = False
if zero_injection is not None:
if net._pd2ppc_lookups['aux']:
aux_bus_lookup = np.concatenate([v for k,v in net._pd2ppc_lookups['aux'].items() if k != 'xward'])
aux_bus = net._pd2ppc_lookups['bus'][aux_bus_lookup]
bus_append[aux_bus, ZERO_INJ_FLAG] = True
if isinstance(zero_injection, str):
if zero_injection == 'auto':
zero_inj_bus_mask = (ppci["bus"][:, 1] == 1) & (ppci["bus"][:, 2:6]==0).all(axis=1) &\
np.isnan(bus_append[:, P:(Q_STD+1)]).all(axis=1)
bus_append[zero_inj_bus_mask, ZERO_INJ_FLAG] = True
elif zero_injection != "aux_bus":
raise UserWarning("zero injection parameter is not correctly initialized")
elif hasattr(zero_injection, '__iter__'):
zero_inj_bus = net._pd2ppc_lookups['bus'][zero_injection]
bus_append[zero_inj_bus, ZERO_INJ_FLAG] = True
zero_inj_bus = np.argwhere(bus_append[:, ZERO_INJ_FLAG]).ravel()
bus_append[zero_inj_bus, P] = 0
bus_append[zero_inj_bus, P_STD] = 1
bus_append[zero_inj_bus, Q] = 0
bus_append[zero_inj_bus, Q_STD] = 1
return bus_append
def _build_measurement_vectors(ppci):
p_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + P])
p_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_FROM])
p_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + P_TO])
q_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + Q])
q_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_FROM])
q_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + Q_TO])
v_bus_not_nan = ~np.isnan(ppci["bus"][:, bus_cols + VM])
i_line_f_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_FROM])
i_line_t_not_nan = ~np.isnan(ppci["branch"][:, branch_cols + IM_TO])
z = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO],
ppci["bus"][q_bus_not_nan, bus_cols + Q],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO],
ppci["bus"][v_bus_not_nan, bus_cols + VM],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO]
)).real.astype(np.float64)
pp_meas_indices = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_IDX],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_IDX],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_IDX],
ppci["bus"][q_bus_not_nan, bus_cols + Q_IDX],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_IDX],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_IDX],
ppci["bus"][v_bus_not_nan, bus_cols + VM_IDX],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_IDX],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_IDX]
)).real.astype(int)
r_cov = np.concatenate((ppci["bus"][p_bus_not_nan, bus_cols + P_STD],
ppci["branch"][p_line_f_not_nan, branch_cols + P_FROM_STD],
ppci["branch"][p_line_t_not_nan, branch_cols + P_TO_STD],
ppci["bus"][q_bus_not_nan, bus_cols + Q_STD],
ppci["branch"][q_line_f_not_nan, branch_cols + Q_FROM_STD],
ppci["branch"][q_line_t_not_nan, branch_cols + Q_TO_STD],
ppci["bus"][v_bus_not_nan, bus_cols + VM_STD],
ppci["branch"][i_line_f_not_nan, branch_cols + IM_FROM_STD],
ppci["branch"][i_line_t_not_nan, branch_cols + IM_TO_STD]
)).real.astype(np.float64)
return z, pp_meas_indices, r_cov
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.