hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73aefa79b47e1039b9702029b3af5cd7718d5a6 | 418 | py | Python | blogproject/settings/production.py | showd0wn/blogproject | 08f250bc31499402a30f68a04485de6ca499c069 | [
"MIT"
] | null | null | null | blogproject/settings/production.py | showd0wn/blogproject | 08f250bc31499402a30f68a04485de6ca499c069 | [
"MIT"
] | null | null | null | blogproject/settings/production.py | showd0wn/blogproject | 08f250bc31499402a30f68a04485de6ca499c069 | [
"MIT"
] | null | null | null | # flake8: noqa
import os
from .common import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['www.qingzhiyu.com']
| 24.588235 | 71 | 0.76555 |
import os
from .common import *
SECRET_KEY = os.environ['DJANGO_SECRET_KEY']
DEBUG = False
ALLOWED_HOSTS = ['www.qingzhiyu.com']
| true | true |
f73af00cc224bc740577c55aad456233af32e58c | 1,649 | py | Python | ipfshttpclient4ipwb/client/bitswap.py | ibnesayeed/py-ipfs-http-client | c3bce1f28d09b26530a6fe0e8f1b3a8a023bc23b | [
"MIT"
] | null | null | null | ipfshttpclient4ipwb/client/bitswap.py | ibnesayeed/py-ipfs-http-client | c3bce1f28d09b26530a6fe0e8f1b3a8a023bc23b | [
"MIT"
] | null | null | null | ipfshttpclient4ipwb/client/bitswap.py | ibnesayeed/py-ipfs-http-client | c3bce1f28d09b26530a6fe0e8f1b3a8a023bc23b | [
"MIT"
] | null | null | null | from . import base
class Section(base.SectionBase):
@base.returns_single_item
def wantlist(self, peer=None, **kwargs):
"""Returns blocks currently on the bitswap wantlist.
.. code-block:: python
>>> client.bitswap.wantlist()
{'Keys': [
'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb',
'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K',
'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp'
]}
Parameters
----------
peer : str
Peer to show wantlist for.
Returns
-------
dict
+------+----------------------------------------------------+
| Keys | List of blocks the connected daemon is looking for |
+------+----------------------------------------------------+
"""
args = (peer,)
return self._client.request('/bitswap/wantlist', args, decoder='json', **kwargs)
@base.returns_single_item
def stat(self, **kwargs):
"""Returns some diagnostic information from the bitswap agent.
.. code-block:: python
>>> client.bitswap.stat()
{'BlocksReceived': 96,
'DupBlksReceived': 73,
'DupDataReceived': 2560601,
'ProviderBufLen': 0,
'Peers': [
'QmNZFQRxt9RMNm2VVtuV2Qx7q69bcMWRVXmr5CEkJEgJJP',
'QmNfCubGpwYZAQxX8LQDsYgB48C4GbfZHuYdexpX9mbNyT',
'QmNfnZ8SCs3jAtNPc8kf3WJqJqSoX7wsX7VqkLdEYMao4u',
…
],
'Wantlist': [
'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb',
'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K',
'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp'
]
}
Returns
-------
dict
Statistics, peers and wanted blocks
"""
return self._client.request('/bitswap/stat', decoder='json', **kwargs) | 25.765625 | 82 | 0.637356 | from . import base
class Section(base.SectionBase):
@base.returns_single_item
def wantlist(self, peer=None, **kwargs):
args = (peer,)
return self._client.request('/bitswap/wantlist', args, decoder='json', **kwargs)
@base.returns_single_item
def stat(self, **kwargs):
return self._client.request('/bitswap/stat', decoder='json', **kwargs) | true | true |
f73af0947b89157946ecb08b611d2c0e698545af | 5,888 | py | Python | exercises/practice/clock/clock_test.py | gsilvapt/python | d675468b2437d4c09c358d023ef998a05a781f58 | [
"MIT"
] | 1,177 | 2017-06-21T20:24:06.000Z | 2022-03-29T02:30:55.000Z | exercises/practice/clock/clock_test.py | gsilvapt/python | d675468b2437d4c09c358d023ef998a05a781f58 | [
"MIT"
] | 1,890 | 2017-06-18T20:06:10.000Z | 2022-03-31T18:35:51.000Z | exercises/practice/clock/clock_test.py | gsilvapt/python | d675468b2437d4c09c358d023ef998a05a781f58 | [
"MIT"
] | 1,095 | 2017-06-26T23:06:19.000Z | 2022-03-29T03:25:38.000Z | import unittest
from clock import (
Clock,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class ClockTest(unittest.TestCase):
# Create A New Clock With An Initial Time
def test_on_the_hour(self):
self.assertEqual(str(Clock(8, 0)), "08:00")
def test_past_the_hour(self):
self.assertEqual(str(Clock(11, 9)), "11:09")
def test_midnight_is_zero_hours(self):
self.assertEqual(str(Clock(24, 0)), "00:00")
def test_hour_rolls_over(self):
self.assertEqual(str(Clock(25, 0)), "01:00")
def test_hour_rolls_over_continuously(self):
self.assertEqual(str(Clock(100, 0)), "04:00")
def test_sixty_minutes_is_next_hour(self):
self.assertEqual(str(Clock(1, 60)), "02:00")
def test_minutes_roll_over(self):
self.assertEqual(str(Clock(0, 160)), "02:40")
def test_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(0, 1723)), "04:43")
def test_hour_and_minutes_roll_over(self):
self.assertEqual(str(Clock(25, 160)), "03:40")
def test_hour_and_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(201, 3001)), "11:01")
def test_hour_and_minutes_roll_over_to_exactly_midnight(self):
self.assertEqual(str(Clock(72, 8640)), "00:00")
def test_negative_hour(self):
self.assertEqual(str(Clock(-1, 15)), "23:15")
def test_negative_hour_rolls_over(self):
self.assertEqual(str(Clock(-25, 0)), "23:00")
def test_negative_hour_rolls_over_continuously(self):
self.assertEqual(str(Clock(-91, 0)), "05:00")
def test_negative_minutes(self):
self.assertEqual(str(Clock(1, -40)), "00:20")
def test_negative_minutes_roll_over(self):
self.assertEqual(str(Clock(1, -160)), "22:20")
def test_negative_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(1, -4820)), "16:40")
def test_negative_sixty_minutes_is_previous_hour(self):
self.assertEqual(str(Clock(2, -60)), "01:00")
def test_negative_hour_and_minutes_both_roll_over(self):
self.assertEqual(str(Clock(-25, -160)), "20:20")
def test_negative_hour_and_minutes_both_roll_over_continuously(self):
self.assertEqual(str(Clock(-121, -5810)), "22:10")
# Add Minutes
def test_add_minutes(self):
self.assertEqual(str(Clock(10, 0) + 3), "10:03")
def test_add_no_minutes(self):
self.assertEqual(str(Clock(6, 41) + 0), "06:41")
def test_add_to_next_hour(self):
self.assertEqual(str(Clock(0, 45) + 40), "01:25")
def test_add_more_than_one_hour(self):
self.assertEqual(str(Clock(10, 0) + 61), "11:01")
def test_add_more_than_two_hours_with_carry(self):
self.assertEqual(str(Clock(0, 45) + 160), "03:25")
def test_add_across_midnight(self):
self.assertEqual(str(Clock(23, 59) + 2), "00:01")
def test_add_more_than_one_day_1500_min_25_hrs(self):
self.assertEqual(str(Clock(5, 32) + 1500), "06:32")
def test_add_more_than_two_days(self):
self.assertEqual(str(Clock(1, 1) + 3500), "11:21")
# Subtract Minutes
def test_subtract_minutes(self):
self.assertEqual(str(Clock(10, 3) - 3), "10:00")
def test_subtract_to_previous_hour(self):
self.assertEqual(str(Clock(10, 3) - 30), "09:33")
def test_subtract_more_than_an_hour(self):
self.assertEqual(str(Clock(10, 3) - 70), "08:53")
def test_subtract_across_midnight(self):
self.assertEqual(str(Clock(0, 3) - 4), "23:59")
def test_subtract_more_than_two_hours(self):
self.assertEqual(str(Clock(0, 0) - 160), "21:20")
def test_subtract_more_than_two_hours_with_borrow(self):
self.assertEqual(str(Clock(6, 15) - 160), "03:35")
def test_subtract_more_than_one_day_1500_min_25_hrs(self):
self.assertEqual(str(Clock(5, 32) - 1500), "04:32")
def test_subtract_more_than_two_days(self):
self.assertEqual(str(Clock(2, 20) - 3000), "00:20")
# Compare Two Clocks For Equality
def test_clocks_with_same_time(self):
self.assertEqual(Clock(15, 37), Clock(15, 37))
def test_clocks_a_minute_apart(self):
self.assertNotEqual(Clock(15, 36), Clock(15, 37))
def test_clocks_an_hour_apart(self):
self.assertNotEqual(Clock(14, 37), Clock(15, 37))
def test_clocks_with_hour_overflow(self):
self.assertEqual(Clock(10, 37), Clock(34, 37))
def test_clocks_with_hour_overflow_by_several_days(self):
self.assertEqual(Clock(3, 11), Clock(99, 11))
def test_clocks_with_negative_hour(self):
self.assertEqual(Clock(22, 40), Clock(-2, 40))
def test_clocks_with_negative_hour_that_wraps(self):
self.assertEqual(Clock(17, 3), Clock(-31, 3))
def test_clocks_with_negative_hour_that_wraps_multiple_times(self):
self.assertEqual(Clock(13, 49), Clock(-83, 49))
def test_clocks_with_minute_overflow(self):
self.assertEqual(Clock(0, 1), Clock(0, 1441))
def test_clocks_with_minute_overflow_by_several_days(self):
self.assertEqual(Clock(2, 2), Clock(2, 4322))
def test_clocks_with_negative_minute(self):
self.assertEqual(Clock(2, 40), Clock(3, -20))
def test_clocks_with_negative_minute_that_wraps(self):
self.assertEqual(Clock(4, 10), Clock(5, -1490))
def test_clocks_with_negative_minute_that_wraps_multiple_times(self):
self.assertEqual(Clock(6, 15), Clock(6, -4305))
def test_clocks_with_negative_hours_and_minutes(self):
self.assertEqual(Clock(7, 32), Clock(-12, -268))
def test_clocks_with_negative_hours_and_minutes_that_wrap(self):
self.assertEqual(Clock(18, 7), Clock(-54, -11513))
def test_full_clock_and_zeroed_clock(self):
self.assertEqual(Clock(24, 0), Clock(0, 0))
if __name__ == "__main__":
unittest.main()
| 33.83908 | 73 | 0.683084 | import unittest
from clock import (
Clock,
)
class ClockTest(unittest.TestCase):
def test_on_the_hour(self):
self.assertEqual(str(Clock(8, 0)), "08:00")
def test_past_the_hour(self):
self.assertEqual(str(Clock(11, 9)), "11:09")
def test_midnight_is_zero_hours(self):
self.assertEqual(str(Clock(24, 0)), "00:00")
def test_hour_rolls_over(self):
self.assertEqual(str(Clock(25, 0)), "01:00")
def test_hour_rolls_over_continuously(self):
self.assertEqual(str(Clock(100, 0)), "04:00")
def test_sixty_minutes_is_next_hour(self):
self.assertEqual(str(Clock(1, 60)), "02:00")
def test_minutes_roll_over(self):
self.assertEqual(str(Clock(0, 160)), "02:40")
def test_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(0, 1723)), "04:43")
def test_hour_and_minutes_roll_over(self):
self.assertEqual(str(Clock(25, 160)), "03:40")
def test_hour_and_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(201, 3001)), "11:01")
def test_hour_and_minutes_roll_over_to_exactly_midnight(self):
self.assertEqual(str(Clock(72, 8640)), "00:00")
def test_negative_hour(self):
self.assertEqual(str(Clock(-1, 15)), "23:15")
def test_negative_hour_rolls_over(self):
self.assertEqual(str(Clock(-25, 0)), "23:00")
def test_negative_hour_rolls_over_continuously(self):
self.assertEqual(str(Clock(-91, 0)), "05:00")
def test_negative_minutes(self):
self.assertEqual(str(Clock(1, -40)), "00:20")
def test_negative_minutes_roll_over(self):
self.assertEqual(str(Clock(1, -160)), "22:20")
def test_negative_minutes_roll_over_continuously(self):
self.assertEqual(str(Clock(1, -4820)), "16:40")
def test_negative_sixty_minutes_is_previous_hour(self):
self.assertEqual(str(Clock(2, -60)), "01:00")
def test_negative_hour_and_minutes_both_roll_over(self):
self.assertEqual(str(Clock(-25, -160)), "20:20")
def test_negative_hour_and_minutes_both_roll_over_continuously(self):
self.assertEqual(str(Clock(-121, -5810)), "22:10")
def test_add_minutes(self):
self.assertEqual(str(Clock(10, 0) + 3), "10:03")
def test_add_no_minutes(self):
self.assertEqual(str(Clock(6, 41) + 0), "06:41")
def test_add_to_next_hour(self):
self.assertEqual(str(Clock(0, 45) + 40), "01:25")
def test_add_more_than_one_hour(self):
self.assertEqual(str(Clock(10, 0) + 61), "11:01")
def test_add_more_than_two_hours_with_carry(self):
self.assertEqual(str(Clock(0, 45) + 160), "03:25")
def test_add_across_midnight(self):
self.assertEqual(str(Clock(23, 59) + 2), "00:01")
def test_add_more_than_one_day_1500_min_25_hrs(self):
self.assertEqual(str(Clock(5, 32) + 1500), "06:32")
def test_add_more_than_two_days(self):
self.assertEqual(str(Clock(1, 1) + 3500), "11:21")
def test_subtract_minutes(self):
self.assertEqual(str(Clock(10, 3) - 3), "10:00")
def test_subtract_to_previous_hour(self):
self.assertEqual(str(Clock(10, 3) - 30), "09:33")
def test_subtract_more_than_an_hour(self):
self.assertEqual(str(Clock(10, 3) - 70), "08:53")
def test_subtract_across_midnight(self):
self.assertEqual(str(Clock(0, 3) - 4), "23:59")
def test_subtract_more_than_two_hours(self):
self.assertEqual(str(Clock(0, 0) - 160), "21:20")
def test_subtract_more_than_two_hours_with_borrow(self):
self.assertEqual(str(Clock(6, 15) - 160), "03:35")
def test_subtract_more_than_one_day_1500_min_25_hrs(self):
self.assertEqual(str(Clock(5, 32) - 1500), "04:32")
def test_subtract_more_than_two_days(self):
self.assertEqual(str(Clock(2, 20) - 3000), "00:20")
def test_clocks_with_same_time(self):
self.assertEqual(Clock(15, 37), Clock(15, 37))
def test_clocks_a_minute_apart(self):
self.assertNotEqual(Clock(15, 36), Clock(15, 37))
def test_clocks_an_hour_apart(self):
self.assertNotEqual(Clock(14, 37), Clock(15, 37))
def test_clocks_with_hour_overflow(self):
self.assertEqual(Clock(10, 37), Clock(34, 37))
def test_clocks_with_hour_overflow_by_several_days(self):
self.assertEqual(Clock(3, 11), Clock(99, 11))
def test_clocks_with_negative_hour(self):
self.assertEqual(Clock(22, 40), Clock(-2, 40))
def test_clocks_with_negative_hour_that_wraps(self):
self.assertEqual(Clock(17, 3), Clock(-31, 3))
def test_clocks_with_negative_hour_that_wraps_multiple_times(self):
self.assertEqual(Clock(13, 49), Clock(-83, 49))
def test_clocks_with_minute_overflow(self):
self.assertEqual(Clock(0, 1), Clock(0, 1441))
def test_clocks_with_minute_overflow_by_several_days(self):
self.assertEqual(Clock(2, 2), Clock(2, 4322))
def test_clocks_with_negative_minute(self):
self.assertEqual(Clock(2, 40), Clock(3, -20))
def test_clocks_with_negative_minute_that_wraps(self):
self.assertEqual(Clock(4, 10), Clock(5, -1490))
def test_clocks_with_negative_minute_that_wraps_multiple_times(self):
self.assertEqual(Clock(6, 15), Clock(6, -4305))
def test_clocks_with_negative_hours_and_minutes(self):
self.assertEqual(Clock(7, 32), Clock(-12, -268))
def test_clocks_with_negative_hours_and_minutes_that_wrap(self):
self.assertEqual(Clock(18, 7), Clock(-54, -11513))
def test_full_clock_and_zeroed_clock(self):
self.assertEqual(Clock(24, 0), Clock(0, 0))
if __name__ == "__main__":
unittest.main()
| true | true |
f73af24a942520c19bbff8b58970e2629928f30a | 14,961 | py | Python | samples/client/petstore/python-experimental/tests/test_deserialization.py | joschi/openapi-generator | d949c8181dbaad2b31880093bf41f11da8853843 | [
"Apache-2.0"
] | 1 | 2020-08-07T08:38:39.000Z | 2020-08-07T08:38:39.000Z | samples/client/petstore/python-experimental/tests/test_deserialization.py | joschi/openapi-generator | d949c8181dbaad2b31880093bf41f11da8853843 | [
"Apache-2.0"
] | 1 | 2020-07-08T09:59:39.000Z | 2020-09-07T14:45:18.000Z | samples/client/petstore/python-experimental/tests/test_deserialization.py | joschi/openapi-generator | d949c8181dbaad2b31880093bf41f11da8853843 | [
"Apache-2.0"
] | 1 | 2019-10-06T12:57:47.000Z | 2019-10-06T12:57:47.000Z | # coding: utf-8
# flake8: noqa
"""
Run the tests.
$ pip install nose (optional)
$ cd OpenAPIPetstore-python
$ nosetests -v
"""
from collections import namedtuple
import json
import os
import time
import unittest
import datetime
import six
import petstore_api
from petstore_api.exceptions import (
ApiTypeError,
ApiKeyError,
ApiValueError,
)
from petstore_api.model import (
enum_test,
pet,
animal,
dog,
parent_pet,
child_lizard,
category,
outer_enum,
outer_number,
string_boolean_map,
)
from petstore_api.model_utils import (
file_type,
int,
model_to_dict,
str,
)
from petstore_api.rest import RESTResponse
MockResponse = namedtuple('MockResponse', 'data')
class DeserializationTests(unittest.TestCase):
def setUp(self):
self.api_client = petstore_api.ApiClient()
self.deserialize = self.api_client.deserialize
def test_enum_test(self):
""" deserialize dict(str, Enum_Test) """
data = {
'enum_test': {
"enum_string": "UPPER",
"enum_string_required": "lower",
"enum_integer": 1,
"enum_number": 1.1,
"outerEnum": "placed"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (enum_test.EnumTest,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(
isinstance(deserialized['enum_test'], enum_test.EnumTest))
value = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"])
outer_enum_val = outer_enum.OuterEnum(value)
sample_instance = enum_test.EnumTest(
enum_string="UPPER",
enum_string_required="lower",
enum_integer=1,
enum_number=1.1,
outer_enum=outer_enum_val
)
self.assertEqual(deserialized['enum_test'], sample_instance)
def test_deserialize_dict_str_pet(self):
""" deserialize dict(str, Pet) """
data = {
'pet': {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (pet.Pet,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['pet'], pet.Pet))
def test_deserialize_dict_str_dog(self):
""" deserialize dict(str, Dog), use discriminator"""
data = {
'dog': {
"className": "Dog",
"color": "white",
"breed": "Jack Russel Terrier"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (animal.Animal,)},), True)
self.assertTrue(isinstance(deserialized, dict))
dog_inst = deserialized['dog']
self.assertTrue(isinstance(dog_inst, dog.Dog))
self.assertEqual(dog_inst.class_name, "Dog")
self.assertEqual(dog_inst.color, "white")
self.assertEqual(dog_inst.breed, "Jack Russel Terrier")
def test_deserialize_lizard(self):
""" deserialize ChildLizard, use discriminator"""
data = {
"pet_type": "ChildLizard",
"lovesRocks": True
}
response = MockResponse(data=json.dumps(data))
lizard = self.deserialize(response,
(parent_pet.ParentPet,), True)
self.assertTrue(isinstance(lizard, child_lizard.ChildLizard))
self.assertEqual(lizard.pet_type, "ChildLizard")
self.assertEqual(lizard.loves_rocks, True)
def test_deserialize_dict_str_int(self):
""" deserialize dict(str, int) """
data = {
'integer': 1
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ({str: (int,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['integer'], int))
def test_deserialize_str(self):
""" deserialize str """
data = "test str"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (str,), True)
self.assertTrue(isinstance(deserialized, str))
def test_deserialize_date(self):
""" deserialize date """
data = "1997-07-16"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.date,), True)
self.assertTrue(isinstance(deserialized, datetime.date))
def test_deserialize_datetime(self):
""" deserialize datetime """
data = "1997-07-16T19:20:30.45+01:00"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertTrue(isinstance(deserialized, datetime.datetime))
def test_deserialize_pet(self):
""" deserialize pet """
data = {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (pet.Pet,), True)
self.assertTrue(isinstance(deserialized, pet.Pet))
self.assertEqual(deserialized.id, 0)
self.assertEqual(deserialized.name, "doggie")
self.assertTrue(isinstance(deserialized.category, category.Category))
self.assertEqual(deserialized.category.name, "string")
self.assertTrue(isinstance(deserialized.tags, list))
self.assertEqual(deserialized.tags[0].full_name, "string")
def test_deserialize_list_of_pet(self):
""" deserialize list[Pet] """
data = [
{
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie0",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
},
{
"id": 1,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie1",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
([pet.Pet],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], pet.Pet))
self.assertEqual(deserialized[0].id, 0)
self.assertEqual(deserialized[1].id, 1)
self.assertEqual(deserialized[0].name, "doggie0")
self.assertEqual(deserialized[1].name, "doggie1")
def test_deserialize_nested_dict(self):
""" deserialize dict(str, dict(str, int)) """
data = {
"foo": {
"bar": 1
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: ({str: (int,)},)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized["foo"], dict))
self.assertTrue(isinstance(deserialized["foo"]["bar"], int))
def test_deserialize_nested_list(self):
""" deserialize list[list[str]] """
data = [["foo"]]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ([[str]],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], list))
self.assertTrue(isinstance(deserialized[0][0], str))
def test_deserialize_none(self):
""" deserialize None """
response = MockResponse(data=json.dumps(None))
error_msg = (
"Invalid type for variable 'received_data'. Required value type is "
"datetime and passed type was NoneType at ['received_data']"
)
with self.assertRaises(ApiTypeError) as exc:
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertEqual(str(exc.exception), error_msg)
def test_deserialize_OuterEnum(self):
""" deserialize OuterEnum """
# make sure that an exception is thrown on an invalid value
with self.assertRaises(ApiValueError):
self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_enum.OuterEnum,),
True
)
# valid value works
placed_str = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"]
)
response = MockResponse(data=json.dumps(placed_str))
deserialized = self.deserialize(response,
(outer_enum.OuterEnum,), True)
self.assertTrue(isinstance(deserialized, outer_enum.OuterEnum))
self.assertTrue(deserialized.value == placed_str)
def test_deserialize_OuterNumber(self):
""" deserialize OuterNumber """
# make sure that an exception is thrown on an invalid type value
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_number.OuterNumber,),
True
)
# make sure that an exception is thrown on an invalid value
with self.assertRaises(ApiValueError):
deserialized = self.deserialize(
MockResponse(data=json.dumps(21.0)),
(outer_number.OuterNumber,),
True
)
# valid value works
number_val = 11.0
response = MockResponse(data=json.dumps(number_val))
number = self.deserialize(response,
(outer_number.OuterNumber,), True)
self.assertTrue(isinstance(number, outer_number.OuterNumber))
self.assertTrue(number.value == number_val)
def test_deserialize_file(self):
"""Ensures that file deserialization works"""
response_types_mixed = (file_type,)
# sample from http://www.jtricks.com/download-text
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {'Content-Disposition': 'attachment; filename=content.txt'}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
file_data = (
"You are reading text file that was supposed to be downloaded\r\n"
"to your hard disk. If your browser offered to save you the file,"
"\r\nthen it handled the Content-Disposition header correctly."
)
http_response = HTTPResponse(
status=200,
reason='OK',
data=file_data,
getheaders=get_headers,
getheader=get_header
)
# response which is deserialized to a file
mock_response = RESTResponse(http_response)
file_path = None
try:
file_object = self.deserialize(
mock_response, response_types_mixed, True)
self.assertTrue(isinstance(file_object, file_type))
file_path = file_object.name
self.assertFalse(file_object.closed)
file_object.close()
if six.PY3:
file_data = file_data.encode('utf-8')
with open(file_path, 'rb') as other_file_object:
self.assertEqual(other_file_object.read(), file_data)
finally:
os.unlink(file_path)
def test_deserialize_binary_to_str(self):
"""Ensures that bytes deserialization works"""
response_types_mixed = (str,)
# sample from http://www.jtricks.com/download-text
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
data = "str"
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(data).encode("utf-8") if six.PY3 else json.dumps(data),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
result = self.deserialize(mock_response, response_types_mixed, True)
self.assertEqual(isinstance(result, str), True)
self.assertEqual(result, data)
def test_deserialize_string_boolean_map(self):
"""
Ensures that string boolean (additional properties)
deserialization works
"""
# make sure that an exception is thrown on an invalid type
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(string_boolean_map.StringBooleanMap,),
True
)
# valid value works
item_val = {'some_key': True}
response = MockResponse(data=json.dumps(item_val))
model = string_boolean_map.StringBooleanMap(**item_val)
deserialized = self.deserialize(response,
(string_boolean_map.StringBooleanMap,), True)
self.assertTrue(isinstance(deserialized, string_boolean_map.StringBooleanMap))
self.assertTrue(deserialized['some_key'] == True)
self.assertTrue(deserialized == model)
| 33.695946 | 86 | 0.56039 |
from collections import namedtuple
import json
import os
import time
import unittest
import datetime
import six
import petstore_api
from petstore_api.exceptions import (
ApiTypeError,
ApiKeyError,
ApiValueError,
)
from petstore_api.model import (
enum_test,
pet,
animal,
dog,
parent_pet,
child_lizard,
category,
outer_enum,
outer_number,
string_boolean_map,
)
from petstore_api.model_utils import (
file_type,
int,
model_to_dict,
str,
)
from petstore_api.rest import RESTResponse
MockResponse = namedtuple('MockResponse', 'data')
class DeserializationTests(unittest.TestCase):
def setUp(self):
self.api_client = petstore_api.ApiClient()
self.deserialize = self.api_client.deserialize
def test_enum_test(self):
data = {
'enum_test': {
"enum_string": "UPPER",
"enum_string_required": "lower",
"enum_integer": 1,
"enum_number": 1.1,
"outerEnum": "placed"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (enum_test.EnumTest,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(
isinstance(deserialized['enum_test'], enum_test.EnumTest))
value = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"])
outer_enum_val = outer_enum.OuterEnum(value)
sample_instance = enum_test.EnumTest(
enum_string="UPPER",
enum_string_required="lower",
enum_integer=1,
enum_number=1.1,
outer_enum=outer_enum_val
)
self.assertEqual(deserialized['enum_test'], sample_instance)
def test_deserialize_dict_str_pet(self):
data = {
'pet': {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (pet.Pet,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['pet'], pet.Pet))
def test_deserialize_dict_str_dog(self):
data = {
'dog': {
"className": "Dog",
"color": "white",
"breed": "Jack Russel Terrier"
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: (animal.Animal,)},), True)
self.assertTrue(isinstance(deserialized, dict))
dog_inst = deserialized['dog']
self.assertTrue(isinstance(dog_inst, dog.Dog))
self.assertEqual(dog_inst.class_name, "Dog")
self.assertEqual(dog_inst.color, "white")
self.assertEqual(dog_inst.breed, "Jack Russel Terrier")
def test_deserialize_lizard(self):
data = {
"pet_type": "ChildLizard",
"lovesRocks": True
}
response = MockResponse(data=json.dumps(data))
lizard = self.deserialize(response,
(parent_pet.ParentPet,), True)
self.assertTrue(isinstance(lizard, child_lizard.ChildLizard))
self.assertEqual(lizard.pet_type, "ChildLizard")
self.assertEqual(lizard.loves_rocks, True)
def test_deserialize_dict_str_int(self):
data = {
'integer': 1
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ({str: (int,)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized['integer'], int))
def test_deserialize_str(self):
data = "test str"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (str,), True)
self.assertTrue(isinstance(deserialized, str))
def test_deserialize_date(self):
data = "1997-07-16"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.date,), True)
self.assertTrue(isinstance(deserialized, datetime.date))
def test_deserialize_datetime(self):
data = "1997-07-16T19:20:30.45+01:00"
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertTrue(isinstance(deserialized, datetime.datetime))
def test_deserialize_pet(self):
data = {
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, (pet.Pet,), True)
self.assertTrue(isinstance(deserialized, pet.Pet))
self.assertEqual(deserialized.id, 0)
self.assertEqual(deserialized.name, "doggie")
self.assertTrue(isinstance(deserialized.category, category.Category))
self.assertEqual(deserialized.category.name, "string")
self.assertTrue(isinstance(deserialized.tags, list))
self.assertEqual(deserialized.tags[0].full_name, "string")
def test_deserialize_list_of_pet(self):
data = [
{
"id": 0,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie0",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
},
{
"id": 1,
"category": {
"id": 0,
"name": "string"
},
"name": "doggie1",
"photoUrls": [
"string"
],
"tags": [
{
"id": 0,
"fullName": "string"
}
],
"status": "available"
}]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
([pet.Pet],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], pet.Pet))
self.assertEqual(deserialized[0].id, 0)
self.assertEqual(deserialized[1].id, 1)
self.assertEqual(deserialized[0].name, "doggie0")
self.assertEqual(deserialized[1].name, "doggie1")
def test_deserialize_nested_dict(self):
data = {
"foo": {
"bar": 1
}
}
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response,
({str: ({str: (int,)},)},), True)
self.assertTrue(isinstance(deserialized, dict))
self.assertTrue(isinstance(deserialized["foo"], dict))
self.assertTrue(isinstance(deserialized["foo"]["bar"], int))
def test_deserialize_nested_list(self):
data = [["foo"]]
response = MockResponse(data=json.dumps(data))
deserialized = self.deserialize(response, ([[str]],), True)
self.assertTrue(isinstance(deserialized, list))
self.assertTrue(isinstance(deserialized[0], list))
self.assertTrue(isinstance(deserialized[0][0], str))
def test_deserialize_none(self):
response = MockResponse(data=json.dumps(None))
error_msg = (
"Invalid type for variable 'received_data'. Required value type is "
"datetime and passed type was NoneType at ['received_data']"
)
with self.assertRaises(ApiTypeError) as exc:
deserialized = self.deserialize(response, (datetime.datetime,), True)
self.assertEqual(str(exc.exception), error_msg)
def test_deserialize_OuterEnum(self):
with self.assertRaises(ApiValueError):
self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_enum.OuterEnum,),
True
)
placed_str = (
outer_enum.OuterEnum.allowed_values[('value',)]["PLACED"]
)
response = MockResponse(data=json.dumps(placed_str))
deserialized = self.deserialize(response,
(outer_enum.OuterEnum,), True)
self.assertTrue(isinstance(deserialized, outer_enum.OuterEnum))
self.assertTrue(deserialized.value == placed_str)
def test_deserialize_OuterNumber(self):
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(outer_number.OuterNumber,),
True
)
with self.assertRaises(ApiValueError):
deserialized = self.deserialize(
MockResponse(data=json.dumps(21.0)),
(outer_number.OuterNumber,),
True
)
number_val = 11.0
response = MockResponse(data=json.dumps(number_val))
number = self.deserialize(response,
(outer_number.OuterNumber,), True)
self.assertTrue(isinstance(number, outer_number.OuterNumber))
self.assertTrue(number.value == number_val)
def test_deserialize_file(self):
response_types_mixed = (file_type,)
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {'Content-Disposition': 'attachment; filename=content.txt'}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
file_data = (
"You are reading text file that was supposed to be downloaded\r\n"
"to your hard disk. If your browser offered to save you the file,"
"\r\nthen it handled the Content-Disposition header correctly."
)
http_response = HTTPResponse(
status=200,
reason='OK',
data=file_data,
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
file_path = None
try:
file_object = self.deserialize(
mock_response, response_types_mixed, True)
self.assertTrue(isinstance(file_object, file_type))
file_path = file_object.name
self.assertFalse(file_object.closed)
file_object.close()
if six.PY3:
file_data = file_data.encode('utf-8')
with open(file_path, 'rb') as other_file_object:
self.assertEqual(other_file_object.read(), file_data)
finally:
os.unlink(file_path)
def test_deserialize_binary_to_str(self):
response_types_mixed = (str,)
HTTPResponse = namedtuple(
'urllib3_response_HTTPResponse',
['status', 'reason', 'data', 'getheaders', 'getheader']
)
headers = {}
def get_headers():
return headers
def get_header(name, default=None):
return headers.get(name, default)
data = "str"
http_response = HTTPResponse(
status=200,
reason='OK',
data=json.dumps(data).encode("utf-8") if six.PY3 else json.dumps(data),
getheaders=get_headers,
getheader=get_header
)
mock_response = RESTResponse(http_response)
result = self.deserialize(mock_response, response_types_mixed, True)
self.assertEqual(isinstance(result, str), True)
self.assertEqual(result, data)
def test_deserialize_string_boolean_map(self):
with self.assertRaises(ApiTypeError):
deserialized = self.deserialize(
MockResponse(data=json.dumps("test str")),
(string_boolean_map.StringBooleanMap,),
True
)
item_val = {'some_key': True}
response = MockResponse(data=json.dumps(item_val))
model = string_boolean_map.StringBooleanMap(**item_val)
deserialized = self.deserialize(response,
(string_boolean_map.StringBooleanMap,), True)
self.assertTrue(isinstance(deserialized, string_boolean_map.StringBooleanMap))
self.assertTrue(deserialized['some_key'] == True)
self.assertTrue(deserialized == model)
| true | true |
f73af2d1928b4c52de88a1592e0eecc993f5fbed | 4,983 | py | Python | 3d-tracking/run_estimation.py | sadjadasghari/3d-vehicle-tracking | af05d52be81db32fc6a21bf60a757ebc46557998 | [
"BSD-3-Clause"
] | 603 | 2019-05-28T01:53:29.000Z | 2022-03-22T00:26:07.000Z | 3d-tracking/run_estimation.py | reinforcementdriving/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 | [
"BSD-3-Clause"
] | 45 | 2019-05-29T05:07:20.000Z | 2022-01-28T04:10:42.000Z | 3d-tracking/run_estimation.py | reinforcementdriving/3d-vehicle-tracking | f8433f72a51dd1a7190570e63e9fda4a924a81f0 | [
"BSD-3-Clause"
] | 149 | 2019-05-28T06:53:12.000Z | 2022-03-24T19:11:58.000Z | import os
import sys
import argparse
import pickle
import subprocess
from time import sleep
'''
Multiple GPUs and processes script for monocular 3D Tracking
'''
def parse_args():
parser = argparse.ArgumentParser(description='Monocular 3D Estimation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('set', choices=['gta', 'kitti'])
parser.add_argument('split', choices=['train', 'val', 'test'],
help='Which data split to use in testing')
parser.add_argument('--session', default='616',
help='Name of the session, to separate exp')
parser.add_argument('--epoch', default='030',
help='How many epochs you used to separate exp')
parser.add_argument('--flag', default='-j 4 -b 1 --n_box_limit 300',
help='Flags for running evaluation code')
parser.add_argument('--gpu', type=str, default='0,1,2,3,4',
help='Which GPU to use in testing.')
parser.add_argument('--n_tasks', type=int, default=1,
help='number of tasks running per GPU. n=1 is enough.')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Show command without running')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Overwrite the output files')
parser.add_argument('--not_gen_output', action='store_true', default=False,
help='Run 3D estimation and store tracking info')
parser.add_argument('--not_merge_result', action='store_true', default=False,
help='Merge 3D result for tracking')
args = parser.parse_args()
args.gen_output = not args.not_gen_output
args.merge_result = not args.not_merge_result
return args
print(' '.join(sys.argv))
args = parse_args()
GPUS = args.gpu.split(',')
# Metadata
if args.set == 'gta':
JSON_ROOT = './data/gta5_tracking/{}/label/'.format(args.split)
CMD = 'python mono_3d_estimation.py gta test \
--data_split {SPLIT} \
--resume ./checkpoint/{CKPT} \
--json_path {JSON} \
--track_name {TRK} \
--session {SESS} {FLAG} --start_epoch {EPOCH}'
else:
JSON_ROOT = './data/kitti_tracking/{}ing/label_02/'.format(args.split)
CMD = 'python mono_3d_estimation.py kitti test \
--data_split {SPLIT} \
--resume ./checkpoint/{CKPT} \
--json_path {JSON} \
--track_name {TRK} \
--is_tracking \
--is_normalizing \
--session {SESS} {FLAG} --start_epoch {EPOCH}'
SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})
if not os.path.isdir(SAVE_PATH):
print("Making {}...".format(SAVE_PATH))
os.mkdir(SAVE_PATH)
CKPT = '{}_{}_checkpoint_{}.pth.tar'.format(args.session, args.set, args.epoch)
SAVE_NAME = '{PATH}{SESS}_{EP}_{SET}_roipool_output.pkl'.format(
**{'PATH': SAVE_PATH, 'SESS': args.session, 'EP': args.epoch, 'SET': args.set})
JSON_PATHS = sorted(
[n for n in os.listdir(JSON_ROOT) if n.endswith('bdd.json')])
# Script
def gen_3d_output():
m = len(GPUS) * args.n_tasks
ps = []
for i in range(len(JSON_PATHS) // m + 1):
for JSON, GPU in zip(JSON_PATHS[m * i:m * i + m], GPUS * args.n_tasks):
TRK = '{}{}_{}_{}_roipool_output.pkl'.format(
SAVE_PATH.replace('output/', ''), args.session, args.epoch,
JSON.replace('.json', ''))
cmd = CMD.format(
**{'CKPT': CKPT, 'JSON': os.path.join(JSON_ROOT, JSON),
'TRK': TRK, 'SESS': args.session, 'EPOCH': args.epoch,
'FLAG': args.flag, 'SPLIT': args.split})
print(i, GPU, cmd)
if not args.dry_run:
if not args.overwrite and os.path.isfile(os.path.join('output', TRK)):
print("SKIP running. Generated file {} Found".format(TRK))
continue
subprocess_env = os.environ.copy()
subprocess_env['CUDA_VISIBLE_DEVICES'] = GPU
p = subprocess.Popen(cmd, shell=True, env=subprocess_env)
ps.append(p)
sleep(1)
if not args.dry_run:
for p in ps:
p.wait()
def merge_3d_results():
all_pkl = []
for JSON in JSON_PATHS:
TRK = '{}{}_{}_{}_roipool_output.pkl'.format(SAVE_PATH, args.session, args.epoch,
JSON.replace('.json', ''))
print("Reading {}...".format(TRK))
if not args.dry_run: all_pkl += pickle.load(open(TRK, 'rb'))
if not args.dry_run and len(all_pkl) > 0:
print("Save to {}".format(SAVE_NAME))
with open(SAVE_NAME, 'wb') as f:
pickle.dump(all_pkl, f)
if __name__ == '__main__':
if args.gen_output:
gen_3d_output()
if args.merge_result:
merge_3d_results()
| 38.627907 | 89 | 0.593819 | import os
import sys
import argparse
import pickle
import subprocess
from time import sleep
def parse_args():
parser = argparse.ArgumentParser(description='Monocular 3D Estimation',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('set', choices=['gta', 'kitti'])
parser.add_argument('split', choices=['train', 'val', 'test'],
help='Which data split to use in testing')
parser.add_argument('--session', default='616',
help='Name of the session, to separate exp')
parser.add_argument('--epoch', default='030',
help='How many epochs you used to separate exp')
parser.add_argument('--flag', default='-j 4 -b 1 --n_box_limit 300',
help='Flags for running evaluation code')
parser.add_argument('--gpu', type=str, default='0,1,2,3,4',
help='Which GPU to use in testing.')
parser.add_argument('--n_tasks', type=int, default=1,
help='number of tasks running per GPU. n=1 is enough.')
parser.add_argument('--dry_run', action='store_true', default=False,
help='Show command without running')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Overwrite the output files')
parser.add_argument('--not_gen_output', action='store_true', default=False,
help='Run 3D estimation and store tracking info')
parser.add_argument('--not_merge_result', action='store_true', default=False,
help='Merge 3D result for tracking')
args = parser.parse_args()
args.gen_output = not args.not_gen_output
args.merge_result = not args.not_merge_result
return args
print(' '.join(sys.argv))
args = parse_args()
GPUS = args.gpu.split(',')
if args.set == 'gta':
JSON_ROOT = './data/gta5_tracking/{}/label/'.format(args.split)
CMD = 'python mono_3d_estimation.py gta test \
--data_split {SPLIT} \
--resume ./checkpoint/{CKPT} \
--json_path {JSON} \
--track_name {TRK} \
--session {SESS} {FLAG} --start_epoch {EPOCH}'
else:
JSON_ROOT = './data/kitti_tracking/{}ing/label_02/'.format(args.split)
CMD = 'python mono_3d_estimation.py kitti test \
--data_split {SPLIT} \
--resume ./checkpoint/{CKPT} \
--json_path {JSON} \
--track_name {TRK} \
--is_tracking \
--is_normalizing \
--session {SESS} {FLAG} --start_epoch {EPOCH}'
SAVE_PATH = 'output/{SESS}_{EP}_{SET}_{SPLIT}_set/'.format(
**{'SESS': args.session, 'EP': args.epoch, 'SET': args.set, 'SPLIT': args.split})
if not os.path.isdir(SAVE_PATH):
print("Making {}...".format(SAVE_PATH))
os.mkdir(SAVE_PATH)
CKPT = '{}_{}_checkpoint_{}.pth.tar'.format(args.session, args.set, args.epoch)
SAVE_NAME = '{PATH}{SESS}_{EP}_{SET}_roipool_output.pkl'.format(
**{'PATH': SAVE_PATH, 'SESS': args.session, 'EP': args.epoch, 'SET': args.set})
JSON_PATHS = sorted(
[n for n in os.listdir(JSON_ROOT) if n.endswith('bdd.json')])
def gen_3d_output():
m = len(GPUS) * args.n_tasks
ps = []
for i in range(len(JSON_PATHS) // m + 1):
for JSON, GPU in zip(JSON_PATHS[m * i:m * i + m], GPUS * args.n_tasks):
TRK = '{}{}_{}_{}_roipool_output.pkl'.format(
SAVE_PATH.replace('output/', ''), args.session, args.epoch,
JSON.replace('.json', ''))
cmd = CMD.format(
**{'CKPT': CKPT, 'JSON': os.path.join(JSON_ROOT, JSON),
'TRK': TRK, 'SESS': args.session, 'EPOCH': args.epoch,
'FLAG': args.flag, 'SPLIT': args.split})
print(i, GPU, cmd)
if not args.dry_run:
if not args.overwrite and os.path.isfile(os.path.join('output', TRK)):
print("SKIP running. Generated file {} Found".format(TRK))
continue
subprocess_env = os.environ.copy()
subprocess_env['CUDA_VISIBLE_DEVICES'] = GPU
p = subprocess.Popen(cmd, shell=True, env=subprocess_env)
ps.append(p)
sleep(1)
if not args.dry_run:
for p in ps:
p.wait()
def merge_3d_results():
all_pkl = []
for JSON in JSON_PATHS:
TRK = '{}{}_{}_{}_roipool_output.pkl'.format(SAVE_PATH, args.session, args.epoch,
JSON.replace('.json', ''))
print("Reading {}...".format(TRK))
if not args.dry_run: all_pkl += pickle.load(open(TRK, 'rb'))
if not args.dry_run and len(all_pkl) > 0:
print("Save to {}".format(SAVE_NAME))
with open(SAVE_NAME, 'wb') as f:
pickle.dump(all_pkl, f)
if __name__ == '__main__':
if args.gen_output:
gen_3d_output()
if args.merge_result:
merge_3d_results()
| true | true |
f73af3764f5f26559a04b84d15a76e6315064ffd | 3,349 | py | Python | datacube/scripts/system.py | eodcgmbh/datacube-core | 0792e519ccfd33e0a0acf368aa6f33ca2c1ea50f | [
"Apache-2.0"
] | 363 | 2017-03-17T07:33:18.000Z | 2022-03-25T12:33:36.000Z | datacube/scripts/system.py | eodcgmbh/datacube-core | 0792e519ccfd33e0a0acf368aa6f33ca2c1ea50f | [
"Apache-2.0"
] | 1,035 | 2017-03-07T05:38:43.000Z | 2022-03-29T03:19:10.000Z | datacube/scripts/system.py | eodcgmbh/datacube-core | 0792e519ccfd33e0a0acf368aa6f33ca2c1ea50f | [
"Apache-2.0"
] | 146 | 2017-03-08T01:08:37.000Z | 2022-03-16T01:52:13.000Z | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import logging
import click
from click import echo, style
from sqlalchemy.exc import OperationalError
import datacube
from datacube.index import index_connect
from datacube.drivers.postgres._connections import IndexSetupError
from datacube.ui import click as ui
from datacube.ui.click import cli, handle_exception
from datacube.config import LocalConfig
_LOG = logging.getLogger('datacube-system')
@cli.group(name='system', help='System commands')
def system():
pass
@system.command('init', help='Initialise the database')
@click.option(
'--default-types/--no-default-types', is_flag=True, default=True,
help="Add default types? (default: true)"
)
@click.option(
'--init-users/--no-init-users', is_flag=True, default=True,
help="Include user roles and grants. (default: true)"
)
@click.option(
'--recreate-views/--no-recreate-views', is_flag=True, default=True,
help="Recreate dynamic views"
)
@click.option(
'--rebuild/--no-rebuild', is_flag=True, default=False,
help="Rebuild all dynamic fields (caution: slow)"
)
@click.option(
'--lock-table/--no-lock-table', is_flag=True, default=False,
help="Allow table to be locked (eg. while creating missing indexes)"
)
@ui.pass_index(expect_initialised=False)
def database_init(index, default_types, init_users, recreate_views, rebuild, lock_table):
echo('Initialising database...')
was_created = index.init_db(with_default_types=default_types,
with_permissions=init_users)
if was_created:
echo(style('Created.', bold=True))
else:
echo(style('Updated.', bold=True))
echo('Checking indexes/views.')
index.metadata_types.check_field_indexes(
allow_table_lock=lock_table,
rebuild_indexes=rebuild,
rebuild_views=recreate_views or rebuild,
)
echo('Done.')
@system.command('check', help='Check and display current configuration')
@ui.pass_config
def check(local_config: LocalConfig):
"""
Verify & view current configuration
"""
def echo_field(name, value):
echo('{:<15}'.format(name + ':') + style(str(value), bold=True))
echo_field('Version', datacube.__version__)
echo_field('Config files', ','.join(local_config.files_loaded))
echo_field('Host',
'{}:{}'.format(local_config['db_hostname'] or 'localhost', local_config.get('db_port', None) or '5432'))
echo_field('Database', local_config['db_database'])
echo_field('User', local_config['db_username'])
echo_field('Environment', local_config['env'])
echo_field('Index Driver', local_config['index_driver'])
echo()
echo('Valid connection:\t', nl=False)
try:
index = index_connect(local_config=local_config)
echo(style('YES', bold=True))
for role, user, description in index.users.list_users():
if user == local_config['db_username']:
echo('You have %s privileges.' % style(role.upper(), bold=True))
except OperationalError as e:
handle_exception('Error Connecting to Database: %s', e)
except IndexSetupError as e:
handle_exception('Database not initialised: %s', e)
| 33.49 | 119 | 0.693938 |
import logging
import click
from click import echo, style
from sqlalchemy.exc import OperationalError
import datacube
from datacube.index import index_connect
from datacube.drivers.postgres._connections import IndexSetupError
from datacube.ui import click as ui
from datacube.ui.click import cli, handle_exception
from datacube.config import LocalConfig
_LOG = logging.getLogger('datacube-system')
@cli.group(name='system', help='System commands')
def system():
pass
@system.command('init', help='Initialise the database')
@click.option(
'--default-types/--no-default-types', is_flag=True, default=True,
help="Add default types? (default: true)"
)
@click.option(
'--init-users/--no-init-users', is_flag=True, default=True,
help="Include user roles and grants. (default: true)"
)
@click.option(
'--recreate-views/--no-recreate-views', is_flag=True, default=True,
help="Recreate dynamic views"
)
@click.option(
'--rebuild/--no-rebuild', is_flag=True, default=False,
help="Rebuild all dynamic fields (caution: slow)"
)
@click.option(
'--lock-table/--no-lock-table', is_flag=True, default=False,
help="Allow table to be locked (eg. while creating missing indexes)"
)
@ui.pass_index(expect_initialised=False)
def database_init(index, default_types, init_users, recreate_views, rebuild, lock_table):
echo('Initialising database...')
was_created = index.init_db(with_default_types=default_types,
with_permissions=init_users)
if was_created:
echo(style('Created.', bold=True))
else:
echo(style('Updated.', bold=True))
echo('Checking indexes/views.')
index.metadata_types.check_field_indexes(
allow_table_lock=lock_table,
rebuild_indexes=rebuild,
rebuild_views=recreate_views or rebuild,
)
echo('Done.')
@system.command('check', help='Check and display current configuration')
@ui.pass_config
def check(local_config: LocalConfig):
def echo_field(name, value):
echo('{:<15}'.format(name + ':') + style(str(value), bold=True))
echo_field('Version', datacube.__version__)
echo_field('Config files', ','.join(local_config.files_loaded))
echo_field('Host',
'{}:{}'.format(local_config['db_hostname'] or 'localhost', local_config.get('db_port', None) or '5432'))
echo_field('Database', local_config['db_database'])
echo_field('User', local_config['db_username'])
echo_field('Environment', local_config['env'])
echo_field('Index Driver', local_config['index_driver'])
echo()
echo('Valid connection:\t', nl=False)
try:
index = index_connect(local_config=local_config)
echo(style('YES', bold=True))
for role, user, description in index.users.list_users():
if user == local_config['db_username']:
echo('You have %s privileges.' % style(role.upper(), bold=True))
except OperationalError as e:
handle_exception('Error Connecting to Database: %s', e)
except IndexSetupError as e:
handle_exception('Database not initialised: %s', e)
| true | true |
f73af4f2232cb7210ca602af5113efe88a0f4ac9 | 415 | py | Python | env/Lib/site-packages/plotly/validators/mesh3d/colorbar/_ticktext.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | env/Lib/site-packages/plotly/validators/mesh3d/colorbar/_ticktext.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | env/Lib/site-packages/plotly/validators/mesh3d/colorbar/_ticktext.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ticktext", parent_name="mesh3d.colorbar", **kwargs):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| 34.583333 | 88 | 0.686747 | import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="ticktext", parent_name="mesh3d.colorbar", **kwargs):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| true | true |
f73af53b54e191afef42406c6da1960d651d65ce | 1,666 | py | Python | examples/widgets/effectwidget3_advanced.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | 13,889 | 2015-01-01T06:43:41.000Z | 2022-03-31T17:37:56.000Z | examples/widgets/effectwidget3_advanced.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | 4,570 | 2015-01-01T17:58:52.000Z | 2022-03-31T18:42:16.000Z | examples/widgets/effectwidget3_advanced.py | yunus-ceyhan/kivy | ba646bd82c8eb5c505c68d18de52f8f3e6cf199a | [
"MIT"
] | 3,786 | 2015-01-01T09:20:45.000Z | 2022-03-30T21:15:05.000Z | '''
This example demonstrates creating and using an AdvancedEffectBase. In
this case, we use it to efficiently pass the touch coordinates into the shader.
'''
from kivy.base import runTouchApp
from kivy.properties import ListProperty
from kivy.lang import Builder
from kivy.uix.effectwidget import EffectWidget, AdvancedEffectBase
effect_string = '''
uniform vec2 touch;
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
vec2 distance = 0.025*(coords - touch);
float dist_mag = (distance.x*distance.x + distance.y*distance.y);
vec3 multiplier = vec3(abs(sin(dist_mag - time)));
return vec4(multiplier * color.xyz, 1.0);
}
'''
class TouchEffect(AdvancedEffectBase):
touch = ListProperty([0.0, 0.0])
def __init__(self, *args, **kwargs):
super(TouchEffect, self).__init__(*args, **kwargs)
self.glsl = effect_string
self.uniforms = {'touch': [0.0, 0.0]}
def on_touch(self, *args, **kwargs):
self.uniforms['touch'] = [float(i) for i in self.touch]
class TouchWidget(EffectWidget):
def __init__(self, *args, **kwargs):
super(TouchWidget, self).__init__(*args, **kwargs)
self.effect = TouchEffect()
self.effects = [self.effect]
def on_touch_down(self, touch):
super(TouchWidget, self).on_touch_down(touch)
self.on_touch_move(touch)
def on_touch_move(self, touch):
self.effect.touch = touch.pos
root = Builder.load_string('''
TouchWidget:
Button:
text: 'Some text!'
Image:
source: 'data/logo/kivy-icon-512.png'
allow_stretch: True
keep_ratio: False
''')
runTouchApp(root)
| 26.03125 | 79 | 0.67467 |
from kivy.base import runTouchApp
from kivy.properties import ListProperty
from kivy.lang import Builder
from kivy.uix.effectwidget import EffectWidget, AdvancedEffectBase
effect_string = '''
uniform vec2 touch;
vec4 effect(vec4 color, sampler2D texture, vec2 tex_coords, vec2 coords)
{
vec2 distance = 0.025*(coords - touch);
float dist_mag = (distance.x*distance.x + distance.y*distance.y);
vec3 multiplier = vec3(abs(sin(dist_mag - time)));
return vec4(multiplier * color.xyz, 1.0);
}
'''
class TouchEffect(AdvancedEffectBase):
touch = ListProperty([0.0, 0.0])
def __init__(self, *args, **kwargs):
super(TouchEffect, self).__init__(*args, **kwargs)
self.glsl = effect_string
self.uniforms = {'touch': [0.0, 0.0]}
def on_touch(self, *args, **kwargs):
self.uniforms['touch'] = [float(i) for i in self.touch]
class TouchWidget(EffectWidget):
def __init__(self, *args, **kwargs):
super(TouchWidget, self).__init__(*args, **kwargs)
self.effect = TouchEffect()
self.effects = [self.effect]
def on_touch_down(self, touch):
super(TouchWidget, self).on_touch_down(touch)
self.on_touch_move(touch)
def on_touch_move(self, touch):
self.effect.touch = touch.pos
root = Builder.load_string('''
TouchWidget:
Button:
text: 'Some text!'
Image:
source: 'data/logo/kivy-icon-512.png'
allow_stretch: True
keep_ratio: False
''')
runTouchApp(root)
| true | true |
f73af54edc6c01a915bdfc48de4bc1fc9a35a276 | 2,661 | py | Python | setup.py | RobertRosca/EXtra-data | b226dc4601807551418a5422e7590540e2d736ee | [
"BSD-3-Clause"
] | null | null | null | setup.py | RobertRosca/EXtra-data | b226dc4601807551418a5422e7590540e2d736ee | [
"BSD-3-Clause"
] | 49 | 2021-01-08T10:53:29.000Z | 2022-03-30T22:07:28.000Z | setup.py | RobertRosca/EXtra-data | b226dc4601807551418a5422e7590540e2d736ee | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os.path as osp
import re
from setuptools import setup, find_packages
import sys
def get_script_path():
return osp.dirname(osp.realpath(sys.argv[0]))
def read(*parts):
return open(osp.join(get_script_path(), *parts)).read()
def find_version(*parts):
vers_file = read(*parts)
match = re.search(r'^__version__ = "(\d+\.\d+\.\d+)"', vers_file, re.M)
if match is not None:
return match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name="EXtra-data",
version=find_version("extra_data", "__init__.py"),
author="European XFEL GmbH",
author_email="da-support@xfel.eu",
maintainer="Thomas Michelat",
url="https://github.com/European-XFEL/EXtra-data",
description="Tools to read and analyse data from European XFEL ",
long_description=read("README.md"),
long_description_content_type='text/markdown',
license="BSD-3-Clause",
packages=find_packages(),
package_data={
'extra_data.tests': ['dssc_geo_june19.h5', 'lpd_mar_18.h5'],
},
entry_points={
"console_scripts": [
"lsxfel = extra_data.lsxfel:main",
"karabo-bridge-serve-files = extra_data.export:main",
"extra-data-validate = extra_data.validation:main",
"extra-data-make-virtual-cxi = extra_data.cli.make_virtual_cxi:main",
"extra-data-locality = extra_data.locality:main",
],
},
install_requires=[
'fabio',
'h5py>=2.7.1',
'karabo-bridge >=0.6',
'matplotlib',
'numpy',
'pandas',
'psutil',
'scipy',
'xarray',
],
extras_require={
'docs': [
'sphinx',
'nbsphinx',
'ipython', # For nbsphinx syntax highlighting
'sphinxcontrib_github_alt',
],
'test': [
'coverage',
'dask[array]',
'nbval',
'pytest',
'pytest-cov',
'testpath',
]
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Physics',
]
)
| 30.586207 | 83 | 0.554303 |
import os.path as osp
import re
from setuptools import setup, find_packages
import sys
def get_script_path():
return osp.dirname(osp.realpath(sys.argv[0]))
def read(*parts):
return open(osp.join(get_script_path(), *parts)).read()
def find_version(*parts):
vers_file = read(*parts)
match = re.search(r'^__version__ = "(\d+\.\d+\.\d+)"', vers_file, re.M)
if match is not None:
return match.group(1)
raise RuntimeError("Unable to find version string.")
setup(name="EXtra-data",
version=find_version("extra_data", "__init__.py"),
author="European XFEL GmbH",
author_email="da-support@xfel.eu",
maintainer="Thomas Michelat",
url="https://github.com/European-XFEL/EXtra-data",
description="Tools to read and analyse data from European XFEL ",
long_description=read("README.md"),
long_description_content_type='text/markdown',
license="BSD-3-Clause",
packages=find_packages(),
package_data={
'extra_data.tests': ['dssc_geo_june19.h5', 'lpd_mar_18.h5'],
},
entry_points={
"console_scripts": [
"lsxfel = extra_data.lsxfel:main",
"karabo-bridge-serve-files = extra_data.export:main",
"extra-data-validate = extra_data.validation:main",
"extra-data-make-virtual-cxi = extra_data.cli.make_virtual_cxi:main",
"extra-data-locality = extra_data.locality:main",
],
},
install_requires=[
'fabio',
'h5py>=2.7.1',
'karabo-bridge >=0.6',
'matplotlib',
'numpy',
'pandas',
'psutil',
'scipy',
'xarray',
],
extras_require={
'docs': [
'sphinx',
'nbsphinx',
'ipython',
'sphinxcontrib_github_alt',
],
'test': [
'coverage',
'dask[array]',
'nbval',
'pytest',
'pytest-cov',
'testpath',
]
},
python_requires='>=3.6',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Scientific/Engineering :: Physics',
]
)
| true | true |
f73af56216a461d62f19449f3eae81d65ad088a6 | 123 | py | Python | Codeforces/A_Difference_Row.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/A_Difference_Row.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/A_Difference_Row.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | n = int(input())
l = list(map(int, input().split()))
ans = []
l = sorted(l)
ans = [l[-1]] + l[1:(n-1)] + [l[0]]
print(*ans) | 20.5 | 35 | 0.487805 | n = int(input())
l = list(map(int, input().split()))
ans = []
l = sorted(l)
ans = [l[-1]] + l[1:(n-1)] + [l[0]]
print(*ans) | true | true |
f73af6433f52825790175b44ec70bfe78a54e8ba | 45,942 | py | Python | localstack/services/awslambda/lambda_api.py | pkjmesra/localstack | 586250f1f26a85fb23793707fec38fcd9b38e3ca | [
"Apache-2.0"
] | 1 | 2019-10-29T07:12:03.000Z | 2019-10-29T07:12:03.000Z | localstack/services/awslambda/lambda_api.py | pkjmesra/localstack | 586250f1f26a85fb23793707fec38fcd9b38e3ca | [
"Apache-2.0"
] | null | null | null | localstack/services/awslambda/lambda_api.py | pkjmesra/localstack | 586250f1f26a85fb23793707fec38fcd9b38e3ca | [
"Apache-2.0"
] | null | null | null | import re
import os
import imp
import sys
import json
import uuid
import time
import base64
import logging
import zipfile
import threading
import traceback
import hashlib
from io import BytesIO
from datetime import datetime
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from flask import Flask, Response, jsonify, request
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.services.awslambda import lambda_executors
from localstack.services.awslambda.lambda_executors import (
LAMBDA_RUNTIME_PYTHON27,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610,
LAMBDA_RUNTIME_NODEJS810,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25,
LAMBDA_RUNTIME_CUSTOM_RUNTIME)
from localstack.utils.common import (to_str, load_file, save_file, TMP_FILES, ensure_readable,
mkdir, unzip, is_zip_file, run, short_uid, is_jar_archive, timestamp, TIMESTAMP_FORMAT_MILLIS,
md5, new_tmp_file, parse_chunked_data, is_number, now_utc, safe_requests, isoformat_milliseconds)
from localstack.utils.aws import aws_stack, aws_responses
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
# List of Lambda runtime names. Keep them in this list, mainly to silence the linter
LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_PYTHON27, LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610, LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25]
# default timeout in seconds
LAMBDA_DEFAULT_TIMEOUT = 3
# default handler and runtime
LAMBDA_DEFAULT_HANDLER = 'handler.handler'
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON27
LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
app = Flask(APP_NAME)
# map ARN strings to lambda function objects
arn_to_lambda = {}
# list of event source mappings for the API
event_source_mappings = []
# logger
LOG = logging.getLogger(__name__)
# mutex for access to CWD and ENV
exec_mutex = threading.Semaphore(1)
# whether to use Docker for execution
DO_USE_DOCKER = None
# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)
# IAM version number
IAM_POLICY_VERSION = '2012-10-17'
# Marker name to indicate that a bucket represents the local file system. This is used for testing
# Serverless applications where we mount the Lambda code directly into the container from the host OS.
BUCKET_MARKER_LOCAL = '__local__'
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
def __init__(self, func_details, qualifier=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ':' + qualifier
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
def cleanup():
global event_source_mappings, arn_to_lambda
arn_to_lambda = {}
event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name):
return aws_stack.lambda_function_arn(function_name)
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
arn = func_arn(lambda_name)
arn_to_lambda[arn].versions.get('$LATEST')['Function'] = lambda_handler
arn_to_lambda[arn].cwd = lambda_cwd
def add_event_source(function_name, source_arn, enabled):
mapping = {
'UUID': str(uuid.uuid4()),
'StateTransitionReason': 'User action',
'LastModified': float(time.mktime(datetime.utcnow().timetuple())),
'BatchSize': 100,
'State': 'Enabled' if enabled is True or enabled is None else 'Disabled',
'FunctionArn': func_arn(function_name),
'EventSourceArn': source_arn,
'LastProcessingResult': 'OK',
'StartingPosition': LAMBDA_DEFAULT_STARTING_POSITION
}
event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, function_name, enabled, batch_size):
for m in event_source_mappings:
if uuid_value == m['UUID']:
if function_name:
m['FunctionArn'] = func_arn(function_name)
m['BatchSize'] = batch_size
m['State'] = 'Enabled' if enabled is True else 'Disabled'
m['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
return m
return {}
def delete_event_source(uuid_value):
for i, m in enumerate(event_source_mappings):
if uuid_value == m['UUID']:
return event_source_mappings.pop(i)
return {}
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if 'docker' in config.LAMBDA_EXECUTOR:
try:
run('docker images', print_error=False)
DO_USE_DOCKER = True
except Exception:
pass
return DO_USE_DOCKER
def process_apigateway_invocation(func_arn, path, payload, headers={},
resource_path=None, method=None, path_params={},
query_string_params={}, request_context={}):
try:
resource_path = resource_path or path
event = {
'path': path,
'headers': dict(headers),
'pathParameters': dict(path_params),
'body': payload,
'isBase64Encoded': False,
'resource': resource_path,
'httpMethod': method,
'queryStringParameters': query_string_params,
'requestContext': request_context,
'stageVariables': {} # TODO
}
return run_lambda(event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def process_sns_notification(func_arn, topic_arn, subscriptionArn, message, message_attributes, subject='',):
try:
event = {
'Records': [{
'EventSource': 'localstack:sns',
'EventVersion': '1.0',
'EventSubscriptionArn': subscriptionArn,
'Sns': {
'Type': 'Notification',
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp(format=TIMESTAMP_FORMAT_MILLIS),
'MessageAttributes': message_attributes
}
}]
}
return run_lambda(event=event, context={}, func_arn=func_arn, asynchronous=True)
except Exception as e:
LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (e, traceback.format_exc()))
def process_kinesis_records(records, stream_name):
# feed records into listening lambdas
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
event = {
'Records': []
}
for rec in records:
event['Records'].append({
'eventID': 'shardId-000000000000:{0}'.format(rec['sequenceNumber']),
'eventSourceARN': stream_arn,
'kinesis': rec
})
run_lambda(event=event, context={}, func_arn=arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def process_sqs_message(message_body, message_attributes, queue_name, region_name=None):
# feed message into the first listening lambda (message should only get processed once)
try:
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
source = next(iter(sources), None)
if source:
arn = source['FunctionArn']
event = {'Records': [{
'body': message_body,
'receiptHandle': 'MessageReceiptHandle',
'md5OfBody': md5(message_body),
'eventSourceARN': queue_arn,
'eventSource': 'aws:sqs',
'awsRegion': region_name,
'messageId': str(uuid.uuid4()),
'attributes': {
'ApproximateFirstReceiveTimestamp': '{}000'.format(int(time.time())),
'SenderId': TEST_AWS_ACCOUNT_ID,
'ApproximateReceiveCount': '1',
'SentTimestamp': '{}000'.format(int(time.time()))
},
'messageAttributes': message_attributes,
'sqs': True,
}]}
run_lambda(event=event, context={}, func_arn=arn)
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if not source_arn or (m['EventSourceArn'].startswith(source_arn)):
result.append(m)
return result
def get_function_version(arn, version):
func = arn_to_lambda.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
func_details = arn_to_lambda.get(arn)
versions = func_details.versions
last_version = func_details.max_version()
versions[str(last_version + 1)] = {
'CodeSize': versions.get('$LATEST').get('CodeSize'),
'CodeSha256': versions.get('$LATEST').get('CodeSha256'),
'Function': versions.get('$LATEST').get('Function'),
'RevisionId': str(uuid.uuid4())
}
return get_function_version(arn, str(last_version + 1))
def do_list_versions(arn):
return sorted([get_function_version(arn, version) for version in
arn_to_lambda.get(arn).versions.keys()], key=lambda k: str(k.get('Version')))
def do_update_alias(arn, alias, version, description=None):
new_alias = {
'AliasArn': arn + ':' + alias,
'FunctionVersion': version,
'Name': alias,
'Description': description or '',
'RevisionId': str(uuid.uuid4())
}
arn_to_lambda.get(arn).aliases[alias] = new_alias
return new_alias
@cloudwatched('lambda')
def run_lambda(event, context, func_arn, version=None, suppress_output=False, asynchronous=False):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = arn_to_lambda.get(func_arn)
if not func_details:
return not_found_error(msg='The resource specified in the request does not exist.')
if not context:
context = LambdaContext(func_details, version)
result, log_output = LAMBDA_EXECUTOR.execute(func_arn, func_details,
event, context=context, version=version, asynchronous=asynchronous)
except Exception as e:
return error_response('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
if lambda_cwd or lambda_env:
exec_mutex.acquire()
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
# generate lambda file name
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
# delete temporary .py and .pyc files on exit
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
handler_module = imp.load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
exec_mutex.release()
return module_vars[handler_function]
def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
delimiter = '.'
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
file_ext = '.js'
elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
file_ext = ''
elif runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
file_ext = '.dll'
delimiter = ':'
elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
file_ext = '.rb'
elif runtime.startswith(LAMBDA_RUNTIME_CUSTOM_RUNTIME):
file_ext = '.sh'
else:
file_ext = '.py'
return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
def get_handler_function_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
# TODO: support Java Lambdas in the future
if runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
return handler_name.split(':')[-1]
else:
return handler_name.split('.')[-1]
def error_response(msg, code=500, error_type='InternalFailure'):
LOG.warning(msg)
return aws_responses.flask_error_response(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
"""Returns the ZIP file contents from a FunctionCode dict.
:type function_code: dict
:param function_code: https://docs.aws.amazon.com/lambda/latest/dg/API_FunctionCode.html
:returns: bytes of the Zip file.
"""
if 'S3Bucket' in function_code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code['S3Bucket'], function_code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in function_code:
zip_file_content = function_code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
else:
raise ClientError('No valid Lambda archive specified.')
return zip_file_content
def get_java_handler(zip_file_content, handler, main_file):
"""Creates a Java handler from an uploaded ZIP or JAR.
:type zip_file_content: bytes
:param zip_file_content: ZIP file bytes.
:type handler: str
:param handler: The lambda handler path.
:type main_file: str
:param main_file: Filepath to the uploaded ZIP or JAR file.
:returns: function or flask.Response
"""
if not is_jar_archive(zip_file_content):
with zipfile.ZipFile(BytesIO(zip_file_content)) as zip_ref:
jar_entries = [e for e in zip_ref.infolist() if e.filename.endswith('.jar')]
if len(jar_entries) != 1:
raise ClientError('Expected exactly one *.jar entry in zip file, found %s' % len(jar_entries))
zip_file_content = zip_ref.read(jar_entries[0].filename)
LOG.info('Found jar file %s with %s bytes in Lambda zip archive' %
(jar_entries[0].filename, len(zip_file_content)))
main_file = new_tmp_file()
save_file(main_file, zip_file_content)
if is_jar_archive(zip_file_content):
def execute(event, context):
result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, handler=handler, main_file=main_file)
return result
return execute, zip_file_content
raise ClientError(error_response(
'Unable to extract Java Lambda handler - file is not a valid zip/jar file', 400, error_type='ValidationError'))
def set_archive_code(code, lambda_name, zip_file_content=None):
# get metadata
lambda_arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[lambda_arn]
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
# Mount or use a local folder lambda executors can reference
# WARNING: this means we're pointing lambda_cwd to a local path in the user's
# file system! We must ensure that there is no data loss (i.e., we must *not* add
# this folder to TMP_FILES or similar).
return code['S3Key']
# get file content
zip_file_content = zip_file_content or get_zip_bytes(code)
# Save the zip file to a temporary file that the lambda executors can reference
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content)
lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8')
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(code, lambda_name, lambda_cwd=None):
def generic_handler(event, context):
raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[arn]
runtime = lambda_details.runtime
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = code
code = code or lambda_details.code
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
zip_file_content = None
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
# Save the zip file to a temporary file that the lambda executors can reference
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
# get local lambda working directory
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode='rb')
# Set the appropriate lambda handler.
lambda_handler = generic_handler
if runtime == LAMBDA_RUNTIME_JAVA8:
# The Lambda executors for Docker subclass LambdaExecutorContainers,
# which runs Lambda in Docker by passing all *.jar files in the function
# working directory as part of the classpath. Because of this, we need to
# save the zip_file_content as a .jar here.
lambda_handler, zip_file_content = get_java_handler(zip_file_content, handler_name, tmp_file)
if is_jar_archive(zip_file_content):
jar_tmp_file = '{working_dir}/{file_name}'.format(
working_dir=lambda_cwd, file_name=LAMBDA_JAR_FILE_NAME)
save_file(jar_tmp_file, zip_file_content)
else:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
if not is_local_mount:
# Lambda code must be uploaded in Zip format
if not is_zip_file(zip_file_content):
raise ClientError(
'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
unzip(tmp_file, lambda_cwd)
main_file = '%s/%s' % (lambda_cwd, handler_file)
if not os.path.exists(main_file):
# Raise an error if (1) this is not a local mount lambda, or (2) we're
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
# -> We do *not* want to raise an error if we're using local mount in non-remote Docker
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
(is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
LOG.debug('Lambda archive content:\n%s' % file_list)
raise ClientError(error_response(
'Unable to find handler script in Lambda archive. %s' % config_debug,
400, error_type='ValidationError'))
if runtime.startswith('python') and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode='rb')
# extract handler
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment)
except Exception as e:
raise ClientError('Unable to get handler function from lambda code.', e)
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
return {'FunctionName': lambda_name}
def do_list_functions():
funcs = []
for f_arn, func in arn_to_lambda.items():
if type(func) != LambdaFunction:
continue
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
func_details = arn_to_lambda.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
funcs.append(format_func_details(func_details))
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or '$LATEST'
func_version = func_details.get_version(version)
result = {
'CodeSha256': func_version.get('CodeSha256'),
'Role': func_details.role,
'Version': version,
'FunctionArn': func_details.arn(),
'FunctionName': func_details.name(),
'CodeSize': func_version.get('CodeSize'),
'Handler': func_details.handler,
'Runtime': func_details.runtime,
'Timeout': func_details.timeout,
'Description': func_details.description,
'MemorySize': func_details.memory_size,
'LastModified': func_details.last_modified,
'TracingConfig': {'Mode': 'PassThrough'},
'RevisionId': func_version.get('RevisionId')
}
if func_details.envvars:
result['Environment'] = {
'Variables': func_details.envvars
}
if (always_add_version or version != '$LATEST') and len(result['FunctionArn'].split(':')) <= 7:
result['FunctionArn'] += ':%s' % (version)
return result
def forward_to_fallback_url(func_arn, data):
""" If LAMBDA_FALLBACK_URL is configured, forward the invocation of this non-existing
Lambda to the configured URL. """
if not config.LAMBDA_FALLBACK_URL:
return None
if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc
dynamodb = aws_stack.connect_to_service('dynamodb')
item = {
'id': {'S': short_uid()},
'timestamp': {'N': str(now_utc())},
'payload': {'S': str(data)}
}
aws_stack.create_dynamodb_table(table_name, partition_key='id')
dynamodb.put_item(TableName=table_name, Item=item)
return ''
if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL):
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data)
return response.content
raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def not_found_error(ref=None, msg=None):
if not msg:
msg = 'The resource you requested does not exist.'
if ref:
msg = '%s not found: %s' % ('Function' if ':function:' in ref else 'Resource', ref)
return error_response(msg, 404, error_type='ResourceNotFoundException')
# ------------
# API METHODS
# ------------
@app.before_request
def before_request():
# fix to enable chunked encoding, as this is used by some Lambda clients
transfer_encoding = request.headers.get('Transfer-Encoding', '').lower()
if transfer_encoding == 'chunked':
request.environ['wsgi.input_terminated'] = True
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
""" Create new function
---
operationId: 'createFunction'
parameters:
- name: 'request'
in: body
"""
arn = 'n/a'
try:
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in arn_to_lambda:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
arn_to_lambda[arn] = func_details = LambdaFunction(arn)
func_details.versions = {'$LATEST': {'RevisionId': str(uuid.uuid4())}}
func_details.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
func_details.description = data.get('Description', '')
func_details.handler = data['Handler']
func_details.runtime = data['Runtime']
func_details.envvars = data.get('Environment', {}).get('Variables', {})
func_details.tags = data.get('Tags', {})
func_details.timeout = data.get('Timeout', LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data['Role']
func_details.memory_size = data.get('MemorySize')
func_details.code = data['Code']
result = set_function_code(func_details.code, lambda_name)
if isinstance(result, Response):
del arn_to_lambda[arn]
return result
# remove content from code attribute, if present
func_details.code.pop('ZipFile', None)
# prepare result
result.update(format_func_details(func_details))
if data.get('Publish', False):
result['Version'] = publish_new_function_version(arn)['Version']
return jsonify(result or {})
except Exception as e:
arn_to_lambda.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
""" Get details for a single function
---
operationId: 'getFunction'
parameters:
- name: 'request'
in: body
- name: 'function'
in: path
"""
funcs = do_list_functions()
for func in funcs:
if func['FunctionName'] == function:
result = {
'Configuration': func,
'Code': {
'Location': '%s/code' % request.url
}
}
lambda_details = arn_to_lambda.get(func['FunctionArn'])
if lambda_details.concurrency is not None:
result['Concurrency'] = lambda_details.concurrency
return jsonify(result)
return not_found_error(func_arn(function))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
""" List functions
---
operationId: 'listFunctions'
parameters:
- name: 'request'
in: body
"""
funcs = do_list_functions()
result = {}
result['Functions'] = funcs
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
""" Delete an existing function
---
operationId: 'deleteFunction'
parameters:
- name: 'request'
in: body
"""
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
try:
arn_to_lambda.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
i = 0
while i < len(event_source_mappings):
mapping = event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
""" Update the code of an existing function
---
operationId: 'updateFunctionCode'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
arn = func_arn(function)
func_details = arn_to_lambda.get(arn)
result.update(format_func_details(func_details))
if isinstance(result, Response):
return result
return jsonify(result or {})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
""" Get the code of an existing function
---
operationId: 'getFunctionCode'
parameters:
"""
arn = func_arn(function)
lambda_cwd = arn_to_lambda[arn].cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode='rb'),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['GET'])
def get_function_configuration(function):
""" Get the configuration of an existing function
---
operationId: 'getFunctionConfiguration'
parameters:
"""
arn = func_arn(function)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
""" Update the configuration of an existing function
---
operationId: 'updateFunctionConfiguration'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
arn = func_arn(function)
# Stop/remove any containers that this arn uses.
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = arn_to_lambda[arn]
if data.get('Handler'):
lambda_details.handler = data['Handler']
if data.get('Runtime'):
lambda_details.runtime = data['Runtime']
env_vars = data.get('Environment', {}).get('Variables')
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get('Timeout'):
lambda_details.timeout = data['Timeout']
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['POST'])
def add_permission(function):
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service('iam')
sid = short_uid()
policy = {
'Version': IAM_POLICY_VERSION,
'Id': 'LambdaFuncAccess-%s' % sid,
'Statement': [{
'Sid': sid,
'Effect': 'Allow',
# TODO: 'Principal' in policies not yet supported in upstream moto
# 'Principal': data.get('Principal') or {'AWS': TEST_AWS_ACCOUNT_ID},
'Action': data.get('Action'),
'Resource': func_arn(function)
}]
}
iam_client.create_policy(PolicyName='lambda_policy_%s' % function,
PolicyDocument=json.dumps(policy), Description='Policy for Lambda function "%s"' % function)
result = {'Statement': sid}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['GET'])
def get_policy(function):
iam_client = aws_stack.connect_to_service('iam')
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
docs = []
for p in policies:
# TODO: Cache policy documents instead of running N+1 API calls here!
versions = iam_client.list_policy_versions(PolicyArn=p['Arn'])['Versions']
default_version = [v for v in versions if v.get('IsDefaultVersion')]
versions = default_version or versions
doc = versions[0]['Document']
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc['Statement'], list):
doc['Statement'] = [doc['Statement']]
docs.append(doc)
policy = [d for d in docs if d['Statement'][0]['Resource'] == func_arn(function)]
if not policy:
return jsonify({}), 404
return jsonify({'Policy': policy[0]})
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
""" Invoke an existing function
---
operationId: 'invokeFunction'
parameters:
- name: 'request'
in: body
"""
# function here can either be an arn or a function name
arn = func_arn(function)
# arn can also contain a qualifier, extract it from there if so
m = re.match('(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?', arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get('Qualifier')
data = request.get_data()
if data:
data = to_str(data)
try:
data = json.loads(data)
except Exception:
try:
# try to read chunked content
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response('The payload is not JSON: %s' % data, 415,
error_type='UnsupportedMediaTypeException')
# Default invocation type is RequestResponse
invocation_type = request.environ.get('HTTP_X_AMZ_INVOCATION_TYPE', 'RequestResponse')
def _create_response(result, status_code=200):
""" Create the final response for the given invocation result """
if isinstance(result, Response):
return result
details = {
'StatusCode': status_code,
'Payload': result,
'Headers': {}
}
if isinstance(result, dict):
for key in ('StatusCode', 'Payload', 'FunctionError'):
if result.get(key):
details[key] = result[key]
# Try to parse parse payload as JSON
payload = details['Payload']
if payload and isinstance(payload, (str, bytes)) and payload[0] in ('[', '{', '"'):
try:
details['Payload'] = json.loads(details['Payload'])
except Exception:
pass
# Set error headers
if details.get('FunctionError'):
details['Headers']['X-Amz-Function-Error'] = str(details['FunctionError'])
# Construct response object
response_obj = details['Payload']
if isinstance(response_obj, (dict, list, bool)) or is_number(response_obj):
# Assume this is a JSON response
response_obj = jsonify(response_obj)
else:
response_obj = str(response_obj)
details['Headers']['Content-Type'] = 'text/plain'
return response_obj, details['StatusCode'], details['Headers']
# check if this lambda function exists
not_found = None
if arn not in arn_to_lambda:
not_found = not_found_error(arn)
elif qualifier and not arn_to_lambda.get(arn).qualifier_exists(qualifier):
not_found = not_found_error('{0}:{1}'.format(arn, qualifier))
if not_found:
forward_result = forward_to_fallback_url(func_arn, data)
if forward_result is not None:
return _create_response(forward_result)
return not_found
if invocation_type == 'RequestResponse':
result = run_lambda(asynchronous=False, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response(result)
elif invocation_type == 'Event':
run_lambda(asynchronous=True, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response('', status_code=202)
elif invocation_type == 'DryRun':
# Assume the dry run always passes.
return _create_response('', status_code=204)
return error_response('Invocation type not one of: RequestResponse, Event or DryRun',
code=400, error_type='InvalidParameterValueException')
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['GET'])
def list_event_source_mappings():
""" List event source mappings
---
operationId: 'listEventSourceMappings'
"""
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['GET'])
def get_event_source_mapping(mapping_uuid):
""" Get an existing event source mapping
---
operationId: 'getEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
mappings = event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get('UUID')]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['POST'])
def create_event_source_mapping():
""" Create new event source mapping
---
operationId: 'createEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(to_str(request.data))
mapping = add_event_source(data['FunctionName'], data['EventSourceArn'], data.get('Enabled'))
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
""" Update an existing event source mapping
---
operationId: 'updateEventSourceMapping'
parameters:
- name: 'request'
in: body
"""
data = json.loads(request.data)
if not mapping_uuid:
return jsonify({})
function_name = data.get('FunctionName') or ''
enabled = data.get('Enabled', True)
batch_size = data.get('BatchSize') or 100
mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
""" Delete an event source mapping
---
operationId: 'deleteEventSourceMapping'
"""
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['POST'])
def publish_version(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['GET'])
def list_versions(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Versions': do_list_versions(arn)})
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['POST'])
def create_alias(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get('Name')
if alias in arn_to_lambda.get(arn).aliases:
return error_response('Alias already exists: %s' % arn + ':' + alias, 404,
error_type='ResourceConflictException')
version = data.get('FunctionVersion')
description = data.get('Description')
return jsonify(do_update_alias(arn, alias, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['PUT'])
def update_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
current_alias = arn_to_lambda.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get('FunctionVersion') or current_alias.get('FunctionVersion')
description = data.get('Description') or current_alias.get('Description')
return jsonify(do_update_alias(arn, name, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['GET'])
def get_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
return jsonify(arn_to_lambda.get(arn).aliases.get(name))
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['GET'])
def list_aliases(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Aliases': sorted(arn_to_lambda.get(arn).aliases.values(),
key=lambda x: x['Name'])})
@app.route('/<version>/functions/<function>/concurrency', methods=['PUT'])
def put_concurrency(version, function):
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
data = json.loads(request.data)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
lambda_details.concurrency = data
return jsonify(data)
@app.route('/<version>/tags/<arn>', methods=['GET'])
def list_tags(version, arn):
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
result = {'Tags': func_details.tags}
return jsonify(result)
@app.route('/<version>/tags/<arn>', methods=['POST'])
def tag_resource(version, arn):
data = json.loads(request.data)
tags = data.get('Tags', {})
if tags:
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route('/<version>/tags/<arn>', methods=['DELETE'])
def untag_resource(version, arn):
tag_keys = request.args.getlist('tagKeys')
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
def serve(port, quiet=True):
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
| 37.657377 | 119 | 0.653346 | import re
import os
import imp
import sys
import json
import uuid
import time
import base64
import logging
import zipfile
import threading
import traceback
import hashlib
from io import BytesIO
from datetime import datetime
from six.moves import cStringIO as StringIO
from six.moves.urllib.parse import urlparse
from flask import Flask, Response, jsonify, request
from localstack import config
from localstack.constants import TEST_AWS_ACCOUNT_ID
from localstack.services import generic_proxy
from localstack.services.awslambda import lambda_executors
from localstack.services.awslambda.lambda_executors import (
LAMBDA_RUNTIME_PYTHON27,
LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610,
LAMBDA_RUNTIME_NODEJS810,
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_DOTNETCORE2,
LAMBDA_RUNTIME_DOTNETCORE21,
LAMBDA_RUNTIME_GOLANG,
LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25,
LAMBDA_RUNTIME_CUSTOM_RUNTIME)
from localstack.utils.common import (to_str, load_file, save_file, TMP_FILES, ensure_readable,
mkdir, unzip, is_zip_file, run, short_uid, is_jar_archive, timestamp, TIMESTAMP_FORMAT_MILLIS,
md5, new_tmp_file, parse_chunked_data, is_number, now_utc, safe_requests, isoformat_milliseconds)
from localstack.utils.aws import aws_stack, aws_responses
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched
APP_NAME = 'lambda_api'
PATH_ROOT = '/2015-03-31'
ARCHIVE_FILE_PATTERN = '%s/lambda.handler.*.jar' % config.TMP_FOLDER
LAMBDA_SCRIPT_PATTERN = '%s/lambda_script_*.py' % config.TMP_FOLDER
LAMBDA_RUNTIMES = [LAMBDA_RUNTIME_PYTHON27, LAMBDA_RUNTIME_PYTHON36,
LAMBDA_RUNTIME_DOTNETCORE2, LAMBDA_RUNTIME_DOTNETCORE21, LAMBDA_RUNTIME_NODEJS,
LAMBDA_RUNTIME_NODEJS610, LAMBDA_RUNTIME_NODEJS810, LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_RUBY,
LAMBDA_RUNTIME_RUBY25]
LAMBDA_DEFAULT_TIMEOUT = 3
LAMBDA_DEFAULT_HANDLER = 'handler.handler'
LAMBDA_DEFAULT_RUNTIME = LAMBDA_RUNTIME_PYTHON27
LAMBDA_DEFAULT_STARTING_POSITION = 'LATEST'
LAMBDA_ZIP_FILE_NAME = 'original_lambda_archive.zip'
LAMBDA_JAR_FILE_NAME = 'original_lambda_archive.jar'
app = Flask(APP_NAME)
arn_to_lambda = {}
event_source_mappings = []
LOG = logging.getLogger(__name__)
exec_mutex = threading.Semaphore(1)
DO_USE_DOCKER = None
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)
IAM_POLICY_VERSION = '2012-10-17'
BUCKET_MARKER_LOCAL = '__local__'
class ClientError(Exception):
def __init__(self, msg, code=400):
super(ClientError, self).__init__(msg)
self.code = code
self.msg = msg
def get_response(self):
if isinstance(self.msg, Response):
return self.msg
return error_response(self.msg, self.code)
class LambdaContext(object):
def __init__(self, func_details, qualifier=None):
self.function_name = func_details.name()
self.function_version = func_details.get_qualifier_version(qualifier)
self.invoked_function_arn = func_details.arn()
if qualifier:
self.invoked_function_arn += ':' + qualifier
def get_remaining_time_in_millis(self):
return 1000 * 60
def cleanup():
global event_source_mappings, arn_to_lambda
arn_to_lambda = {}
event_source_mappings = []
LAMBDA_EXECUTOR.cleanup()
def func_arn(function_name):
return aws_stack.lambda_function_arn(function_name)
def add_function_mapping(lambda_name, lambda_handler, lambda_cwd=None):
arn = func_arn(lambda_name)
arn_to_lambda[arn].versions.get('$LATEST')['Function'] = lambda_handler
arn_to_lambda[arn].cwd = lambda_cwd
def add_event_source(function_name, source_arn, enabled):
mapping = {
'UUID': str(uuid.uuid4()),
'StateTransitionReason': 'User action',
'LastModified': float(time.mktime(datetime.utcnow().timetuple())),
'BatchSize': 100,
'State': 'Enabled' if enabled is True or enabled is None else 'Disabled',
'FunctionArn': func_arn(function_name),
'EventSourceArn': source_arn,
'LastProcessingResult': 'OK',
'StartingPosition': LAMBDA_DEFAULT_STARTING_POSITION
}
event_source_mappings.append(mapping)
return mapping
def update_event_source(uuid_value, function_name, enabled, batch_size):
for m in event_source_mappings:
if uuid_value == m['UUID']:
if function_name:
m['FunctionArn'] = func_arn(function_name)
m['BatchSize'] = batch_size
m['State'] = 'Enabled' if enabled is True else 'Disabled'
m['LastModified'] = float(time.mktime(datetime.utcnow().timetuple()))
return m
return {}
def delete_event_source(uuid_value):
for i, m in enumerate(event_source_mappings):
if uuid_value == m['UUID']:
return event_source_mappings.pop(i)
return {}
def use_docker():
global DO_USE_DOCKER
if DO_USE_DOCKER is None:
DO_USE_DOCKER = False
if 'docker' in config.LAMBDA_EXECUTOR:
try:
run('docker images', print_error=False)
DO_USE_DOCKER = True
except Exception:
pass
return DO_USE_DOCKER
def process_apigateway_invocation(func_arn, path, payload, headers={},
resource_path=None, method=None, path_params={},
query_string_params={}, request_context={}):
try:
resource_path = resource_path or path
event = {
'path': path,
'headers': dict(headers),
'pathParameters': dict(path_params),
'body': payload,
'isBase64Encoded': False,
'resource': resource_path,
'httpMethod': method,
'queryStringParameters': query_string_params,
'requestContext': request_context,
'stageVariables': {}
}
return run_lambda(event=event, context={}, func_arn=func_arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))
def process_sns_notification(func_arn, topic_arn, subscriptionArn, message, message_attributes, subject='',):
try:
event = {
'Records': [{
'EventSource': 'localstack:sns',
'EventVersion': '1.0',
'EventSubscriptionArn': subscriptionArn,
'Sns': {
'Type': 'Notification',
'TopicArn': topic_arn,
'Subject': subject,
'Message': message,
'Timestamp': timestamp(format=TIMESTAMP_FORMAT_MILLIS),
'MessageAttributes': message_attributes
}
}]
}
return run_lambda(event=event, context={}, func_arn=func_arn, asynchronous=True)
except Exception as e:
LOG.warning('Unable to run Lambda function on SNS message: %s %s' % (e, traceback.format_exc()))
def process_kinesis_records(records, stream_name):
try:
stream_arn = aws_stack.kinesis_stream_arn(stream_name)
sources = get_event_sources(source_arn=stream_arn)
for source in sources:
arn = source['FunctionArn']
event = {
'Records': []
}
for rec in records:
event['Records'].append({
'eventID': 'shardId-000000000000:{0}'.format(rec['sequenceNumber']),
'eventSourceARN': stream_arn,
'kinesis': rec
})
run_lambda(event=event, context={}, func_arn=arn)
except Exception as e:
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))
def process_sqs_message(message_body, message_attributes, queue_name, region_name=None):
try:
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
source = next(iter(sources), None)
if source:
arn = source['FunctionArn']
event = {'Records': [{
'body': message_body,
'receiptHandle': 'MessageReceiptHandle',
'md5OfBody': md5(message_body),
'eventSourceARN': queue_arn,
'eventSource': 'aws:sqs',
'awsRegion': region_name,
'messageId': str(uuid.uuid4()),
'attributes': {
'ApproximateFirstReceiveTimestamp': '{}000'.format(int(time.time())),
'SenderId': TEST_AWS_ACCOUNT_ID,
'ApproximateReceiveCount': '1',
'SentTimestamp': '{}000'.format(int(time.time()))
},
'messageAttributes': message_attributes,
'sqs': True,
}]}
run_lambda(event=event, context={}, func_arn=arn)
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))
def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if not source_arn or (m['EventSourceArn'].startswith(source_arn)):
result.append(m)
return result
def get_function_version(arn, version):
func = arn_to_lambda.get(arn)
return format_func_details(func, version=version, always_add_version=True)
def publish_new_function_version(arn):
func_details = arn_to_lambda.get(arn)
versions = func_details.versions
last_version = func_details.max_version()
versions[str(last_version + 1)] = {
'CodeSize': versions.get('$LATEST').get('CodeSize'),
'CodeSha256': versions.get('$LATEST').get('CodeSha256'),
'Function': versions.get('$LATEST').get('Function'),
'RevisionId': str(uuid.uuid4())
}
return get_function_version(arn, str(last_version + 1))
def do_list_versions(arn):
return sorted([get_function_version(arn, version) for version in
arn_to_lambda.get(arn).versions.keys()], key=lambda k: str(k.get('Version')))
def do_update_alias(arn, alias, version, description=None):
new_alias = {
'AliasArn': arn + ':' + alias,
'FunctionVersion': version,
'Name': alias,
'Description': description or '',
'RevisionId': str(uuid.uuid4())
}
arn_to_lambda.get(arn).aliases[alias] = new_alias
return new_alias
@cloudwatched('lambda')
def run_lambda(event, context, func_arn, version=None, suppress_output=False, asynchronous=False):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
stream = StringIO()
sys.stdout = stream
sys.stderr = stream
try:
func_arn = aws_stack.fix_arn(func_arn)
func_details = arn_to_lambda.get(func_arn)
if not func_details:
return not_found_error(msg='The resource specified in the request does not exist.')
if not context:
context = LambdaContext(func_details, version)
result, log_output = LAMBDA_EXECUTOR.execute(func_arn, func_details,
event, context=context, version=version, asynchronous=asynchronous)
except Exception as e:
return error_response('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
finally:
if suppress_output:
sys.stdout = stdout_
sys.stderr = stderr_
return result
def exec_lambda_code(script, handler_function='handler', lambda_cwd=None, lambda_env=None):
if lambda_cwd or lambda_env:
exec_mutex.acquire()
if lambda_cwd:
previous_cwd = os.getcwd()
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if lambda_env:
previous_env = dict(os.environ)
os.environ.update(lambda_env)
lambda_id = 'l_%s' % short_uid()
lambda_file = LAMBDA_SCRIPT_PATTERN.replace('*', lambda_id)
save_file(lambda_file, script)
TMP_FILES.append(lambda_file)
TMP_FILES.append('%sc' % lambda_file)
try:
handler_module = imp.load_source(lambda_id, lambda_file)
module_vars = handler_module.__dict__
except Exception as e:
LOG.error('Unable to exec: %s %s' % (script, traceback.format_exc()))
raise e
finally:
if lambda_cwd or lambda_env:
if lambda_cwd:
os.chdir(previous_cwd)
sys.path.pop(0)
if lambda_env:
os.environ = previous_env
exec_mutex.release()
return module_vars[handler_function]
def get_handler_file_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
delimiter = '.'
if runtime.startswith(LAMBDA_RUNTIME_NODEJS):
file_ext = '.js'
elif runtime.startswith(LAMBDA_RUNTIME_GOLANG):
file_ext = ''
elif runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
file_ext = '.dll'
delimiter = ':'
elif runtime.startswith(LAMBDA_RUNTIME_RUBY):
file_ext = '.rb'
elif runtime.startswith(LAMBDA_RUNTIME_CUSTOM_RUNTIME):
file_ext = '.sh'
else:
file_ext = '.py'
return '%s%s' % (handler_name.split(delimiter)[0], file_ext)
def get_handler_function_from_name(handler_name, runtime=LAMBDA_DEFAULT_RUNTIME):
if runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE2) or runtime.startswith(LAMBDA_RUNTIME_DOTNETCORE21):
return handler_name.split(':')[-1]
else:
return handler_name.split('.')[-1]
def error_response(msg, code=500, error_type='InternalFailure'):
LOG.warning(msg)
return aws_responses.flask_error_response(msg, code=code, error_type=error_type)
def get_zip_bytes(function_code):
if 'S3Bucket' in function_code:
s3_client = aws_stack.connect_to_service('s3')
bytes_io = BytesIO()
try:
s3_client.download_fileobj(function_code['S3Bucket'], function_code['S3Key'], bytes_io)
zip_file_content = bytes_io.getvalue()
except Exception as e:
raise ClientError('Unable to fetch Lambda archive from S3: %s' % e, 404)
elif 'ZipFile' in function_code:
zip_file_content = function_code['ZipFile']
zip_file_content = base64.b64decode(zip_file_content)
else:
raise ClientError('No valid Lambda archive specified.')
return zip_file_content
def get_java_handler(zip_file_content, handler, main_file):
if not is_jar_archive(zip_file_content):
with zipfile.ZipFile(BytesIO(zip_file_content)) as zip_ref:
jar_entries = [e for e in zip_ref.infolist() if e.filename.endswith('.jar')]
if len(jar_entries) != 1:
raise ClientError('Expected exactly one *.jar entry in zip file, found %s' % len(jar_entries))
zip_file_content = zip_ref.read(jar_entries[0].filename)
LOG.info('Found jar file %s with %s bytes in Lambda zip archive' %
(jar_entries[0].filename, len(zip_file_content)))
main_file = new_tmp_file()
save_file(main_file, zip_file_content)
if is_jar_archive(zip_file_content):
def execute(event, context):
result, log_output = lambda_executors.EXECUTOR_LOCAL.execute_java_lambda(
event, context, handler=handler, main_file=main_file)
return result
return execute, zip_file_content
raise ClientError(error_response(
'Unable to extract Java Lambda handler - file is not a valid zip/jar file', 400, error_type='ValidationError'))
def set_archive_code(code, lambda_name, zip_file_content=None):
lambda_arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[lambda_arn]
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
LAMBDA_EXECUTOR.cleanup(lambda_arn)
if is_local_mount:
return code['S3Key']
zip_file_content = zip_file_content or get_zip_bytes(code)
code_sha_256 = base64.standard_b64encode(hashlib.sha256(zip_file_content).digest())
lambda_details.get_version('$LATEST')['CodeSize'] = len(zip_file_content)
lambda_details.get_version('$LATEST')['CodeSha256'] = code_sha_256.decode('utf-8')
tmp_dir = '%s/zipfile.%s' % (config.TMP_FOLDER, short_uid())
mkdir(tmp_dir)
tmp_file = '%s/%s' % (tmp_dir, LAMBDA_ZIP_FILE_NAME)
save_file(tmp_file, zip_file_content)
TMP_FILES.append(tmp_dir)
lambda_details.cwd = tmp_dir
return tmp_dir
def set_function_code(code, lambda_name, lambda_cwd=None):
def generic_handler(event, context):
raise ClientError(('Unable to find executor for Lambda function "%s". Note that ' +
'Node.js, Golang, and .Net Core Lambdas currently require LAMBDA_EXECUTOR=docker') % lambda_name)
arn = func_arn(lambda_name)
lambda_details = arn_to_lambda[arn]
runtime = lambda_details.runtime
lambda_environment = lambda_details.envvars
handler_name = lambda_details.handler or LAMBDA_DEFAULT_HANDLER
code_passed = code
code = code or lambda_details.code
is_local_mount = code.get('S3Bucket') == BUCKET_MARKER_LOCAL
zip_file_content = None
if code_passed:
lambda_cwd = lambda_cwd or set_archive_code(code_passed, lambda_name)
if not is_local_mount:
zip_file_content = get_zip_bytes(code_passed)
else:
lambda_cwd = lambda_cwd or lambda_details.cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
if not zip_file_content:
zip_file_content = load_file(tmp_file, mode='rb')
lambda_handler = generic_handler
if runtime == LAMBDA_RUNTIME_JAVA8:
lambda_handler, zip_file_content = get_java_handler(zip_file_content, handler_name, tmp_file)
if is_jar_archive(zip_file_content):
jar_tmp_file = '{working_dir}/{file_name}'.format(
working_dir=lambda_cwd, file_name=LAMBDA_JAR_FILE_NAME)
save_file(jar_tmp_file, zip_file_content)
else:
handler_file = get_handler_file_from_name(handler_name, runtime=runtime)
handler_function = get_handler_function_from_name(handler_name, runtime=runtime)
if not is_local_mount:
if not is_zip_file(zip_file_content):
raise ClientError(
'Uploaded Lambda code for runtime ({}) is not in Zip format'.format(runtime))
unzip(tmp_file, lambda_cwd)
main_file = '%s/%s' % (lambda_cwd, handler_file)
if not os.path.exists(main_file):
# running Lambdas locally (not in Docker), or (3) we're using remote Docker.
if not is_local_mount or not use_docker() or config.LAMBDA_REMOTE_DOCKER:
file_list = run('cd "%s"; du -d 3 .' % lambda_cwd)
config_debug = ('Config for local mount, docker, remote: "%s", "%s", "%s"' %
(is_local_mount, use_docker(), config.LAMBDA_REMOTE_DOCKER))
LOG.debug('Lambda archive content:\n%s' % file_list)
raise ClientError(error_response(
'Unable to find handler script in Lambda archive. %s' % config_debug,
400, error_type='ValidationError'))
if runtime.startswith('python') and not use_docker():
try:
# make sure the file is actually readable, then read contents
ensure_readable(main_file)
zip_file_content = load_file(main_file, mode='rb')
# extract handler
lambda_handler = exec_lambda_code(
zip_file_content,
handler_function=handler_function,
lambda_cwd=lambda_cwd,
lambda_env=lambda_environment)
except Exception as e:
raise ClientError('Unable to get handler function from lambda code.', e)
add_function_mapping(lambda_name, lambda_handler, lambda_cwd)
return {'FunctionName': lambda_name}
def do_list_functions():
funcs = []
for f_arn, func in arn_to_lambda.items():
if type(func) != LambdaFunction:
continue
func_name = f_arn.split(':function:')[-1]
arn = func_arn(func_name)
func_details = arn_to_lambda.get(arn)
if not func_details:
# this can happen if we're accessing Lambdas from a different region (ARN mismatch)
continue
funcs.append(format_func_details(func_details))
return funcs
def format_func_details(func_details, version=None, always_add_version=False):
version = version or '$LATEST'
func_version = func_details.get_version(version)
result = {
'CodeSha256': func_version.get('CodeSha256'),
'Role': func_details.role,
'Version': version,
'FunctionArn': func_details.arn(),
'FunctionName': func_details.name(),
'CodeSize': func_version.get('CodeSize'),
'Handler': func_details.handler,
'Runtime': func_details.runtime,
'Timeout': func_details.timeout,
'Description': func_details.description,
'MemorySize': func_details.memory_size,
'LastModified': func_details.last_modified,
'TracingConfig': {'Mode': 'PassThrough'},
'RevisionId': func_version.get('RevisionId')
}
if func_details.envvars:
result['Environment'] = {
'Variables': func_details.envvars
}
if (always_add_version or version != '$LATEST') and len(result['FunctionArn'].split(':')) <= 7:
result['FunctionArn'] += ':%s' % (version)
return result
def forward_to_fallback_url(func_arn, data):
if not config.LAMBDA_FALLBACK_URL:
return None
if config.LAMBDA_FALLBACK_URL.startswith('dynamodb://'):
table_name = urlparse(config.LAMBDA_FALLBACK_URL.replace('dynamodb://', 'http://')).netloc
dynamodb = aws_stack.connect_to_service('dynamodb')
item = {
'id': {'S': short_uid()},
'timestamp': {'N': str(now_utc())},
'payload': {'S': str(data)}
}
aws_stack.create_dynamodb_table(table_name, partition_key='id')
dynamodb.put_item(TableName=table_name, Item=item)
return ''
if re.match(r'^https?://.+', config.LAMBDA_FALLBACK_URL):
response = safe_requests.post(config.LAMBDA_FALLBACK_URL, data)
return response.content
raise ClientError('Unexpected value for LAMBDA_FALLBACK_URL: %s' % config.LAMBDA_FALLBACK_URL)
def not_found_error(ref=None, msg=None):
if not msg:
msg = 'The resource you requested does not exist.'
if ref:
msg = '%s not found: %s' % ('Function' if ':function:' in ref else 'Resource', ref)
return error_response(msg, 404, error_type='ResourceNotFoundException')
@app.before_request
def before_request():
transfer_encoding = request.headers.get('Transfer-Encoding', '').lower()
if transfer_encoding == 'chunked':
request.environ['wsgi.input_terminated'] = True
@app.route('%s/functions' % PATH_ROOT, methods=['POST'])
def create_function():
arn = 'n/a'
try:
data = json.loads(to_str(request.data))
lambda_name = data['FunctionName']
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_CREATE_FUNC,
payload={'n': event_publisher.get_hash(lambda_name)})
arn = func_arn(lambda_name)
if arn in arn_to_lambda:
return error_response('Function already exist: %s' %
lambda_name, 409, error_type='ResourceConflictException')
arn_to_lambda[arn] = func_details = LambdaFunction(arn)
func_details.versions = {'$LATEST': {'RevisionId': str(uuid.uuid4())}}
func_details.last_modified = isoformat_milliseconds(datetime.utcnow()) + '+0000'
func_details.description = data.get('Description', '')
func_details.handler = data['Handler']
func_details.runtime = data['Runtime']
func_details.envvars = data.get('Environment', {}).get('Variables', {})
func_details.tags = data.get('Tags', {})
func_details.timeout = data.get('Timeout', LAMBDA_DEFAULT_TIMEOUT)
func_details.role = data['Role']
func_details.memory_size = data.get('MemorySize')
func_details.code = data['Code']
result = set_function_code(func_details.code, lambda_name)
if isinstance(result, Response):
del arn_to_lambda[arn]
return result
func_details.code.pop('ZipFile', None)
result.update(format_func_details(func_details))
if data.get('Publish', False):
result['Version'] = publish_new_function_version(arn)['Version']
return jsonify(result or {})
except Exception as e:
arn_to_lambda.pop(arn, None)
if isinstance(e, ClientError):
return e.get_response()
return error_response('Unknown error: %s %s' % (e, traceback.format_exc()))
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['GET'])
def get_function(function):
funcs = do_list_functions()
for func in funcs:
if func['FunctionName'] == function:
result = {
'Configuration': func,
'Code': {
'Location': '%s/code' % request.url
}
}
lambda_details = arn_to_lambda.get(func['FunctionArn'])
if lambda_details.concurrency is not None:
result['Concurrency'] = lambda_details.concurrency
return jsonify(result)
return not_found_error(func_arn(function))
@app.route('%s/functions/' % PATH_ROOT, methods=['GET'])
def list_functions():
funcs = do_list_functions()
result = {}
result['Functions'] = funcs
return jsonify(result)
@app.route('%s/functions/<function>' % PATH_ROOT, methods=['DELETE'])
def delete_function(function):
arn = func_arn(function)
LAMBDA_EXECUTOR.cleanup(arn)
try:
arn_to_lambda.pop(arn)
except KeyError:
return not_found_error(func_arn(function))
event_publisher.fire_event(event_publisher.EVENT_LAMBDA_DELETE_FUNC,
payload={'n': event_publisher.get_hash(function)})
i = 0
while i < len(event_source_mappings):
mapping = event_source_mappings[i]
if mapping['FunctionArn'] == arn:
del event_source_mappings[i]
i -= 1
i += 1
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['PUT'])
def update_function_code(function):
data = json.loads(to_str(request.data))
result = set_function_code(data, function)
arn = func_arn(function)
func_details = arn_to_lambda.get(arn)
result.update(format_func_details(func_details))
if isinstance(result, Response):
return result
return jsonify(result or {})
@app.route('%s/functions/<function>/code' % PATH_ROOT, methods=['GET'])
def get_function_code(function):
arn = func_arn(function)
lambda_cwd = arn_to_lambda[arn].cwd
tmp_file = '%s/%s' % (lambda_cwd, LAMBDA_ZIP_FILE_NAME)
return Response(load_file(tmp_file, mode='rb'),
mimetype='application/zip',
headers={'Content-Disposition': 'attachment; filename=lambda_archive.zip'})
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['GET'])
def get_function_configuration(function):
arn = func_arn(function)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
result = format_func_details(lambda_details)
return jsonify(result)
@app.route('%s/functions/<function>/configuration' % PATH_ROOT, methods=['PUT'])
def update_function_configuration(function):
data = json.loads(to_str(request.data))
arn = func_arn(function)
LAMBDA_EXECUTOR.cleanup(arn)
lambda_details = arn_to_lambda[arn]
if data.get('Handler'):
lambda_details.handler = data['Handler']
if data.get('Runtime'):
lambda_details.runtime = data['Runtime']
env_vars = data.get('Environment', {}).get('Variables')
if env_vars is not None:
lambda_details.envvars = env_vars
if data.get('Timeout'):
lambda_details.timeout = data['Timeout']
result = {}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['POST'])
def add_permission(function):
data = json.loads(to_str(request.data))
iam_client = aws_stack.connect_to_service('iam')
sid = short_uid()
policy = {
'Version': IAM_POLICY_VERSION,
'Id': 'LambdaFuncAccess-%s' % sid,
'Statement': [{
'Sid': sid,
'Effect': 'Allow',
'Action': data.get('Action'),
'Resource': func_arn(function)
}]
}
iam_client.create_policy(PolicyName='lambda_policy_%s' % function,
PolicyDocument=json.dumps(policy), Description='Policy for Lambda function "%s"' % function)
result = {'Statement': sid}
return jsonify(result)
@app.route('%s/functions/<function>/policy' % PATH_ROOT, methods=['GET'])
def get_policy(function):
iam_client = aws_stack.connect_to_service('iam')
policies = iam_client.list_policies(Scope='Local', MaxItems=500)['Policies']
docs = []
for p in policies:
versions = iam_client.list_policy_versions(PolicyArn=p['Arn'])['Versions']
default_version = [v for v in versions if v.get('IsDefaultVersion')]
versions = default_version or versions
doc = versions[0]['Document']
doc = doc if isinstance(doc, dict) else json.loads(doc)
if not isinstance(doc['Statement'], list):
doc['Statement'] = [doc['Statement']]
docs.append(doc)
policy = [d for d in docs if d['Statement'][0]['Resource'] == func_arn(function)]
if not policy:
return jsonify({}), 404
return jsonify({'Policy': policy[0]})
@app.route('%s/functions/<function>/invocations' % PATH_ROOT, methods=['POST'])
def invoke_function(function):
arn = func_arn(function)
m = re.match('(arn:aws:lambda:.*:.*:function:[a-zA-Z0-9-_]+)(:.*)?', arn)
if m and m.group(2):
qualifier = m.group(2)[1:]
arn = m.group(1)
else:
qualifier = request.args.get('Qualifier')
data = request.get_data()
if data:
data = to_str(data)
try:
data = json.loads(data)
except Exception:
try:
data = json.loads(parse_chunked_data(data))
except Exception:
return error_response('The payload is not JSON: %s' % data, 415,
error_type='UnsupportedMediaTypeException')
invocation_type = request.environ.get('HTTP_X_AMZ_INVOCATION_TYPE', 'RequestResponse')
def _create_response(result, status_code=200):
if isinstance(result, Response):
return result
details = {
'StatusCode': status_code,
'Payload': result,
'Headers': {}
}
if isinstance(result, dict):
for key in ('StatusCode', 'Payload', 'FunctionError'):
if result.get(key):
details[key] = result[key]
payload = details['Payload']
if payload and isinstance(payload, (str, bytes)) and payload[0] in ('[', '{', '"'):
try:
details['Payload'] = json.loads(details['Payload'])
except Exception:
pass
# Set error headers
if details.get('FunctionError'):
details['Headers']['X-Amz-Function-Error'] = str(details['FunctionError'])
# Construct response object
response_obj = details['Payload']
if isinstance(response_obj, (dict, list, bool)) or is_number(response_obj):
# Assume this is a JSON response
response_obj = jsonify(response_obj)
else:
response_obj = str(response_obj)
details['Headers']['Content-Type'] = 'text/plain'
return response_obj, details['StatusCode'], details['Headers']
# check if this lambda function exists
not_found = None
if arn not in arn_to_lambda:
not_found = not_found_error(arn)
elif qualifier and not arn_to_lambda.get(arn).qualifier_exists(qualifier):
not_found = not_found_error('{0}:{1}'.format(arn, qualifier))
if not_found:
forward_result = forward_to_fallback_url(func_arn, data)
if forward_result is not None:
return _create_response(forward_result)
return not_found
if invocation_type == 'RequestResponse':
result = run_lambda(asynchronous=False, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response(result)
elif invocation_type == 'Event':
run_lambda(asynchronous=True, func_arn=arn, event=data, context={}, version=qualifier)
return _create_response('', status_code=202)
elif invocation_type == 'DryRun':
# Assume the dry run always passes.
return _create_response('', status_code=204)
return error_response('Invocation type not one of: RequestResponse, Event or DryRun',
code=400, error_type='InvalidParameterValueException')
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['GET'])
def list_event_source_mappings():
event_source_arn = request.args.get('EventSourceArn')
function_name = request.args.get('FunctionName')
mappings = event_source_mappings
if event_source_arn:
mappings = [m for m in mappings if event_source_arn == m.get('EventSourceArn')]
if function_name:
function_arn = func_arn(function_name)
mappings = [m for m in mappings if function_arn == m.get('FunctionArn')]
response = {
'EventSourceMappings': mappings
}
return jsonify(response)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['GET'])
def get_event_source_mapping(mapping_uuid):
mappings = event_source_mappings
mappings = [m for m in mappings if mapping_uuid == m.get('UUID')]
if len(mappings) == 0:
return not_found_error()
return jsonify(mappings[0])
@app.route('%s/event-source-mappings/' % PATH_ROOT, methods=['POST'])
def create_event_source_mapping():
data = json.loads(to_str(request.data))
mapping = add_event_source(data['FunctionName'], data['EventSourceArn'], data.get('Enabled'))
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['PUT'])
def update_event_source_mapping(mapping_uuid):
data = json.loads(request.data)
if not mapping_uuid:
return jsonify({})
function_name = data.get('FunctionName') or ''
enabled = data.get('Enabled', True)
batch_size = data.get('BatchSize') or 100
mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size)
return jsonify(mapping)
@app.route('%s/event-source-mappings/<mapping_uuid>' % PATH_ROOT, methods=['DELETE'])
def delete_event_source_mapping(mapping_uuid):
if not mapping_uuid:
return jsonify({})
mapping = delete_event_source(mapping_uuid)
return jsonify(mapping)
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['POST'])
def publish_version(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify(publish_new_function_version(arn))
@app.route('%s/functions/<function>/versions' % PATH_ROOT, methods=['GET'])
def list_versions(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Versions': do_list_versions(arn)})
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['POST'])
def create_alias(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
data = json.loads(request.data)
alias = data.get('Name')
if alias in arn_to_lambda.get(arn).aliases:
return error_response('Alias already exists: %s' % arn + ':' + alias, 404,
error_type='ResourceConflictException')
version = data.get('FunctionVersion')
description = data.get('Description')
return jsonify(do_update_alias(arn, alias, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['PUT'])
def update_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
current_alias = arn_to_lambda.get(arn).aliases.get(name)
data = json.loads(request.data)
version = data.get('FunctionVersion') or current_alias.get('FunctionVersion')
description = data.get('Description') or current_alias.get('Description')
return jsonify(do_update_alias(arn, name, version, description))
@app.route('%s/functions/<function>/aliases/<name>' % PATH_ROOT, methods=['GET'])
def get_alias(function, name):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
if name not in arn_to_lambda.get(arn).aliases:
return not_found_error(msg='Alias not found: %s:%s' % (arn, name))
return jsonify(arn_to_lambda.get(arn).aliases.get(name))
@app.route('%s/functions/<function>/aliases' % PATH_ROOT, methods=['GET'])
def list_aliases(function):
arn = func_arn(function)
if arn not in arn_to_lambda:
return not_found_error(arn)
return jsonify({'Aliases': sorted(arn_to_lambda.get(arn).aliases.values(),
key=lambda x: x['Name'])})
@app.route('/<version>/functions/<function>/concurrency', methods=['PUT'])
def put_concurrency(version, function):
# the version for put_concurrency != PATH_ROOT, at the time of this
# writing it's: /2017-10-31 for this endpoint
# https://docs.aws.amazon.com/lambda/latest/dg/API_PutFunctionConcurrency.html
arn = func_arn(function)
data = json.loads(request.data)
lambda_details = arn_to_lambda.get(arn)
if not lambda_details:
return not_found_error(arn)
lambda_details.concurrency = data
return jsonify(data)
@app.route('/<version>/tags/<arn>', methods=['GET'])
def list_tags(version, arn):
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
result = {'Tags': func_details.tags}
return jsonify(result)
@app.route('/<version>/tags/<arn>', methods=['POST'])
def tag_resource(version, arn):
data = json.loads(request.data)
tags = data.get('Tags', {})
if tags:
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
if func_details:
func_details.tags.update(tags)
return jsonify({})
@app.route('/<version>/tags/<arn>', methods=['DELETE'])
def untag_resource(version, arn):
tag_keys = request.args.getlist('tagKeys')
func_details = arn_to_lambda.get(arn)
if not func_details:
return not_found_error(arn)
for tag_key in tag_keys:
func_details.tags.pop(tag_key, None)
return jsonify({})
def serve(port, quiet=True):
# initialize the Lambda executor
LAMBDA_EXECUTOR.startup()
generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
| true | true |
f73af6ddf0f40d493b96b19dc9ea9cef14e39447 | 604 | py | Python | kharidari/shop/migrations/0001_initial.py | priyanshgupta1998/Kharidari--A-Shopping-Site | f0fb74e7bf4de8677c23548e51fc45109195c7ff | [
"MIT"
] | null | null | null | kharidari/shop/migrations/0001_initial.py | priyanshgupta1998/Kharidari--A-Shopping-Site | f0fb74e7bf4de8677c23548e51fc45109195c7ff | [
"MIT"
] | null | null | null | kharidari/shop/migrations/0001_initial.py | priyanshgupta1998/Kharidari--A-Shopping-Site | f0fb74e7bf4de8677c23548e51fc45109195c7ff | [
"MIT"
] | null | null | null | # Generated by Django 2.2.4 on 2021-01-02 08:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
| 25.166667 | 114 | 0.566225 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(max_length=50)),
('desc', models.CharField(max_length=300)),
('pub_date', models.DateField()),
],
),
]
| true | true |
f73af75e17d128397d1226fe21905f18f55d224c | 2,456 | py | Python | surfaces/trianglesDrawing.py | massimo-nocentini/cagd | baec0824951ebc17e23e16e71339dd8fd79b11c2 | [
"MIT"
] | null | null | null | surfaces/trianglesDrawing.py | massimo-nocentini/cagd | baec0824951ebc17e23e16e71339dd8fd79b11c2 | [
"MIT"
] | null | null | null | surfaces/trianglesDrawing.py | massimo-nocentini/cagd | baec0824951ebc17e23e16e71339dd8fd79b11c2 | [
"MIT"
] | null | null | null |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import trianglesCore as tc
def draw(*surfaces, figure_size_tuple=(15,15)):
sizex, sizey = figure_size_tuple
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
# necessary adjustment if `draw` is used for only one patch
if len(surfaces) is 2 and not isinstance(surfaces[0], tuple):
surface, triangles = surfaces
surfaces = [(surface, triangles)]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
for surface, triangles in surfaces:
x, y, z = surface[0,:],surface[1,:],surface[2,:]
ax.plot_trisurf(x, y, z,
triangles=triangles, cmap=plt.cm.Spectral)#, edgecolor='none')
return fig, ax
def draw_repeated_degree_elevation(
control_net, snapshots=None, degrees=None, formatting_string="Order {}:"):
order, control_net = control_net
if snapshots:
def drawer(print_handler):
nonlocal order, control_net
runs = 2
snapshots_list = [int(np.ceil(l)) for l in np.logspace(0,runs,num=snapshots)]
s = 0
for i in range(1, (10**runs)+1):
order, control_net = tc.degree_elevation(order, control_net)
if i == snapshots_list[s]:
print_handler(order)
s += 1
elif degrees:
def drawer(print_handler):
nonlocal order, control_net, degrees
degrees = sorted(degrees)
for d, degree in enumerate(degrees):
if degree > order-1: break
for i in range(max(degrees)+1):
order, control_net = tc.degree_elevation(order, control_net)
if order-1 is degrees[d]:
print_handler(order)
d += 1
if d == len(degrees): break
def print_handler(order):
if formatting_string is not False: print(formatting_string.format(order))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = control_net[0,:]
ys = control_net[1,:]
zs = control_net[2,:]
ax.scatter(xs, ys, zs, c='r', marker='o')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
drawer(print_handler) # finally draw some pictures
return order, control_net
| 29.590361 | 89 | 0.586726 |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import trianglesCore as tc
def draw(*surfaces, figure_size_tuple=(15,15)):
sizex, sizey = figure_size_tuple
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
if len(surfaces) is 2 and not isinstance(surfaces[0], tuple):
surface, triangles = surfaces
surfaces = [(surface, triangles)]
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
for surface, triangles in surfaces:
x, y, z = surface[0,:],surface[1,:],surface[2,:]
ax.plot_trisurf(x, y, z,
triangles=triangles, cmap=plt.cm.Spectral)
return fig, ax
def draw_repeated_degree_elevation(
control_net, snapshots=None, degrees=None, formatting_string="Order {}:"):
order, control_net = control_net
if snapshots:
def drawer(print_handler):
nonlocal order, control_net
runs = 2
snapshots_list = [int(np.ceil(l)) for l in np.logspace(0,runs,num=snapshots)]
s = 0
for i in range(1, (10**runs)+1):
order, control_net = tc.degree_elevation(order, control_net)
if i == snapshots_list[s]:
print_handler(order)
s += 1
elif degrees:
def drawer(print_handler):
nonlocal order, control_net, degrees
degrees = sorted(degrees)
for d, degree in enumerate(degrees):
if degree > order-1: break
for i in range(max(degrees)+1):
order, control_net = tc.degree_elevation(order, control_net)
if order-1 is degrees[d]:
print_handler(order)
d += 1
if d == len(degrees): break
def print_handler(order):
if formatting_string is not False: print(formatting_string.format(order))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = control_net[0,:]
ys = control_net[1,:]
zs = control_net[2,:]
ax.scatter(xs, ys, zs, c='r', marker='o')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
drawer(print_handler)
return order, control_net
| true | true |
f73af773ca483fd0145380bad75f59373e203edf | 1,110 | py | Python | snips_nlu/__about__.py | theHazzard/snips-nlu | 52714e902bb7baeed4f95bca16719606e6fac5cc | [
"Apache-2.0"
] | 1 | 2020-01-10T00:36:30.000Z | 2020-01-10T00:36:30.000Z | snips_nlu/__about__.py | theHazzard/snips-nlu | 52714e902bb7baeed4f95bca16719606e6fac5cc | [
"Apache-2.0"
] | null | null | null | snips_nlu/__about__.py | theHazzard/snips-nlu | 52714e902bb7baeed4f95bca16719606e6fac5cc | [
"Apache-2.0"
] | null | null | null | # inspired from:
# https://python-packaging-user-guide.readthedocs.io/guides/single-sourcing-package-version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
# pylint:disable=line-too-long
__title__ = "snips_nlu"
__summary__ = "Snips Natural Language Understanding library"
__github_url__ = "https://github.com/snipsco/snips-nlu"
__doc_url__ = "https://snips-nlu.readthedocs.io"
__tracker_url__ = "https://github.com/snipsco/snips-nlu/issues"
__author__ = "Clement Doumouro, Adrien Ball"
__email__ = "clement.doumouro@snips.ai, adrien.ball@snips.ai"
__license__ = "Apache License, Version 2.0"
__version__ = "0.20.1"
__model_version__ = "0.20.0"
__download_url__ = "https://github.com/snipsco/snips-nlu-language-resources/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/snipsco/snips-nlu-language-resources/master/compatibility.json"
__shortcuts__ = "https://raw.githubusercontent.com/snipsco/snips-nlu-language-resources/master/shortcuts.json"
__entities_download_url__ = "https://resources.snips.ai/nlu/gazetteer-entities"
# pylint:enable=line-too-long
| 42.692308 | 118 | 0.791892 |
__title__ = "snips_nlu"
__summary__ = "Snips Natural Language Understanding library"
__github_url__ = "https://github.com/snipsco/snips-nlu"
__doc_url__ = "https://snips-nlu.readthedocs.io"
__tracker_url__ = "https://github.com/snipsco/snips-nlu/issues"
__author__ = "Clement Doumouro, Adrien Ball"
__email__ = "clement.doumouro@snips.ai, adrien.ball@snips.ai"
__license__ = "Apache License, Version 2.0"
__version__ = "0.20.1"
__model_version__ = "0.20.0"
__download_url__ = "https://github.com/snipsco/snips-nlu-language-resources/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/snipsco/snips-nlu-language-resources/master/compatibility.json"
__shortcuts__ = "https://raw.githubusercontent.com/snipsco/snips-nlu-language-resources/master/shortcuts.json"
__entities_download_url__ = "https://resources.snips.ai/nlu/gazetteer-entities"
| true | true |
f73af796a6da1263483c84ff8775622f02baa36f | 595 | py | Python | blogging_api/models/tag.py | mustafmst/plain-blog | 12a575e9b343c404a64c6a50847e45b9b8680938 | [
"MIT"
] | null | null | null | blogging_api/models/tag.py | mustafmst/plain-blog | 12a575e9b343c404a64c6a50847e45b9b8680938 | [
"MIT"
] | 5 | 2021-03-10T06:39:20.000Z | 2021-09-22T18:39:19.000Z | blogging_api/models/tag.py | mustafmst/plain-blog | 12a575e9b343c404a64c6a50847e45b9b8680938 | [
"MIT"
] | 1 | 2020-03-08T12:20:43.000Z | 2020-03-08T12:20:43.000Z | from django.db import models
from rest_framework import serializers
class Tag(models.Model):
"""
Post tag model
"""
name = models.CharField(max_length=50, unique=True, primary_key=True)
backend_tag = models.BooleanField(default=False)
def __str__(self):
return self.name
class TagSerializer(serializers.Serializer):
"""
REST API serializer for Tag model
"""
name = serializers.CharField()
def update(self, instance, validated_data):
pass
def create(self, validated_data):
return Tag.objects.create(**validated_data)
| 22.037037 | 73 | 0.685714 | from django.db import models
from rest_framework import serializers
class Tag(models.Model):
name = models.CharField(max_length=50, unique=True, primary_key=True)
backend_tag = models.BooleanField(default=False)
def __str__(self):
return self.name
class TagSerializer(serializers.Serializer):
name = serializers.CharField()
def update(self, instance, validated_data):
pass
def create(self, validated_data):
return Tag.objects.create(**validated_data)
| true | true |
f73af7e5966b3ded656c0e5c48f5c329fb3a9f53 | 272 | py | Python | appli/daemon/fourcheball_daemon.py | kasshyss/fourcheball | 12f0312eb32cc2fb247d8a5d8fb6b0b7ea195a32 | [
"MIT"
] | null | null | null | appli/daemon/fourcheball_daemon.py | kasshyss/fourcheball | 12f0312eb32cc2fb247d8a5d8fb6b0b7ea195a32 | [
"MIT"
] | null | null | null | appli/daemon/fourcheball_daemon.py | kasshyss/fourcheball | 12f0312eb32cc2fb247d8a5d8fb6b0b7ea195a32 | [
"MIT"
] | null | null | null |
import pdb
import logging
import m_conf as conf
logging.basicConfig()
logger = logging.getLogger('fourcheball_daemon')
logger.setLevel(conf.get_logger_level())
logger.info('Init daemon')
while True:
logger.debug('Run 1')
raw_input() # dev purpose
sleep(3)
| 17 | 48 | 0.738971 |
import pdb
import logging
import m_conf as conf
logging.basicConfig()
logger = logging.getLogger('fourcheball_daemon')
logger.setLevel(conf.get_logger_level())
logger.info('Init daemon')
while True:
logger.debug('Run 1')
raw_input()
sleep(3)
| true | true |
f73af8f755cc898881f10347c9b52721483b2dc7 | 2,285 | py | Python | cohesity_management_sdk/models/virtual_disk_id.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/virtual_disk_id.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/virtual_disk_id.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class VirtualDiskId(object):
"""Implementation of the 'Virtual Disk Id.' model.
Specifies information about virtual disk which includes disk uuid,
controller type, bus number and unit number.
Attributes:
bus_number (long|int): Specifies the Id of the controller bus that
controls the disk.
controller_type (string): Specifies the controller type like SCSI, or
IDE etc.
disk_id (string): Specfies the uuid of the virtual disk.
unit_number (long|int): Specifies the disk file name. This is the VMDK
name and not the flat file name.
"""
# Create a mapping from Model property names to API property names
_names = {
"bus_number":'busNumber',
"controller_type":'controllerType',
"disk_id":'diskId',
"unit_number":'unitNumber'
}
def __init__(self,
bus_number=None,
controller_type=None,
disk_id=None,
unit_number=None):
"""Constructor for the VirtualDiskId class"""
# Initialize members of the class
self.bus_number = bus_number
self.controller_type = controller_type
self.disk_id = disk_id
self.unit_number = unit_number
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
bus_number = dictionary.get('busNumber')
controller_type = dictionary.get('controllerType')
disk_id = dictionary.get('diskId')
unit_number = dictionary.get('unitNumber')
# Return an object of this model
return cls(bus_number,
controller_type,
disk_id,
unit_number)
| 30.466667 | 81 | 0.609628 |
class VirtualDiskId(object):
_names = {
"bus_number":'busNumber',
"controller_type":'controllerType',
"disk_id":'diskId',
"unit_number":'unitNumber'
}
def __init__(self,
bus_number=None,
controller_type=None,
disk_id=None,
unit_number=None):
self.bus_number = bus_number
self.controller_type = controller_type
self.disk_id = disk_id
self.unit_number = unit_number
@classmethod
def from_dictionary(cls,
dictionary):
if dictionary is None:
return None
bus_number = dictionary.get('busNumber')
controller_type = dictionary.get('controllerType')
disk_id = dictionary.get('diskId')
unit_number = dictionary.get('unitNumber')
return cls(bus_number,
controller_type,
disk_id,
unit_number)
| true | true |
f73afa5f7826feefd827315c4f21dca04103fde3 | 279 | py | Python | accounts/urls.py | kekeho/nitnc-cancel-notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | 1 | 2018-12-05T13:35:17.000Z | 2018-12-05T13:35:17.000Z | accounts/urls.py | kekeho/nitnc-cancel-notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | 3 | 2020-02-11T23:33:44.000Z | 2021-06-10T21:04:08.000Z | accounts/urls.py | kekeho/NITNC-Cancel-Notification | ba8bd9f3ed6b7c831b244d6d1cff537f72bf5057 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.login_func, name='login'),
path('logout/', views.logout_func, name='logout'),
path('profile/', views.profile, name='profile'),
]
| 25.363636 | 54 | 0.659498 | from django.urls import path
from . import views
urlpatterns = [
path('signup/', views.signup, name='signup'),
path('login/', views.login_func, name='login'),
path('logout/', views.logout_func, name='logout'),
path('profile/', views.profile, name='profile'),
]
| true | true |
f73afb0ad5aec3ffcd5866b9178b6da1669735eb | 381 | py | Python | reviewboard/scmtools/evolutions/repository_access_control.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/scmtools/evolutions/repository_access_control.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | reviewboard/scmtools/evolutions/repository_access_control.py | pombredanne/reviewboard | 15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d | [
"MIT"
] | null | null | null | from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('Repository', 'review_groups', models.ManyToManyField,
related_model='reviews.Group'),
AddField('Repository', 'public', models.BooleanField, initial=True),
AddField('Repository', 'users', models.ManyToManyField,
related_model='auth.User')
]
| 31.75 | 72 | 0.711286 | from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('Repository', 'review_groups', models.ManyToManyField,
related_model='reviews.Group'),
AddField('Repository', 'public', models.BooleanField, initial=True),
AddField('Repository', 'users', models.ManyToManyField,
related_model='auth.User')
]
| true | true |
f73afb40d706a15d509b322556bb628eaa1ba16e | 233 | py | Python | backend/base/serializers.py | mazin123100/ecommerce-app | d93926316cc7ba7647d02e43d4dd065184394966 | [
"MIT"
] | null | null | null | backend/base/serializers.py | mazin123100/ecommerce-app | d93926316cc7ba7647d02e43d4dd065184394966 | [
"MIT"
] | null | null | null | backend/base/serializers.py | mazin123100/ecommerce-app | d93926316cc7ba7647d02e43d4dd065184394966 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__' | 25.888889 | 53 | 0.759657 | from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Product
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__' | true | true |
f73afbb3bfe017359420464452f7afee9f7f6cc0 | 2,974 | py | Python | xml_info.py | BenHall-7/MSClang | f85129f5642dc42b1ad3cd7603a0dbc63062ae05 | [
"MIT"
] | null | null | null | xml_info.py | BenHall-7/MSClang | f85129f5642dc42b1ad3cd7603a0dbc63062ae05 | [
"MIT"
] | null | null | null | xml_info.py | BenHall-7/MSClang | f85129f5642dc42b1ad3cd7603a0dbc63062ae05 | [
"MIT"
] | null | null | null | from xml.etree import ElementTree as ET
from enum import Enum
class VariableLabel:
def __init__(self, id=None, name=None):
self.id = id
self.name = name
self.methods = []
def getMethod(self, searchFor):
if type(searchFor) == str:
for method in self.methods:
if method.name == searchFor:
return method
elif type(searchFor) == int:
for method in self.methods:
if method.id == searchFor:
return method
class MscXmlInfo:
def __init__(self, filename=None):
self.globals = []
self.functions = []
self.syscalls = []
if filename != None:
self.read(filename)
def read(self, filename):
labels = ET.parse(filename).getroot()
for function in labels.find("functions").findall("function"):
self.functions.append(VariableLabel(
int(function.find("id").text, 0),
function.find("name").text
))
for globalNode in labels.find("globals").findall("global"):
self.globals.append(VariableLabel(
int(globalNode.find("id").text, 0),
globalNode.find("name").text
))
for syscall in labels.find("syscalls").findall("syscall"):
syscallLabel = VariableLabel(
int(syscall.find("id").text, 0),
syscall.find("name").text
)
if syscall.find("methods") != None:
for method in syscall.find("methods").findall("method"):
syscallLabel.methods.append(VariableLabel(
int(method.find("id").text, 0),
method.find("name").text
))
self.syscalls.append(syscallLabel)
def getFunc(self, searchFor):
if type(searchFor) == str:
for function in self.functions:
if function.name == searchFor:
return function
elif type(searchFor) == int:
for function in self.functions:
if function.id == searchFor:
return function
def getSyscall(self, searchFor):
if type(searchFor) == str:
for syscall in self.syscalls:
if syscall.name == searchFor:
return syscall
elif type(searchFor) == int:
for syscall in self.syscalls:
if syscall.id == searchFor:
return syscall
def getGlobal(self, searchFor):
if type(searchFor) == str:
for globalVar in self.globals:
if globalVar.name == searchFor:
return globalVar
elif type(searchFor) == int:
for globalVar in self.globals:
if globalVar.id == searchFor:
return globalVar
| 35.831325 | 72 | 0.515131 | from xml.etree import ElementTree as ET
from enum import Enum
class VariableLabel:
def __init__(self, id=None, name=None):
self.id = id
self.name = name
self.methods = []
def getMethod(self, searchFor):
if type(searchFor) == str:
for method in self.methods:
if method.name == searchFor:
return method
elif type(searchFor) == int:
for method in self.methods:
if method.id == searchFor:
return method
class MscXmlInfo:
def __init__(self, filename=None):
self.globals = []
self.functions = []
self.syscalls = []
if filename != None:
self.read(filename)
def read(self, filename):
labels = ET.parse(filename).getroot()
for function in labels.find("functions").findall("function"):
self.functions.append(VariableLabel(
int(function.find("id").text, 0),
function.find("name").text
))
for globalNode in labels.find("globals").findall("global"):
self.globals.append(VariableLabel(
int(globalNode.find("id").text, 0),
globalNode.find("name").text
))
for syscall in labels.find("syscalls").findall("syscall"):
syscallLabel = VariableLabel(
int(syscall.find("id").text, 0),
syscall.find("name").text
)
if syscall.find("methods") != None:
for method in syscall.find("methods").findall("method"):
syscallLabel.methods.append(VariableLabel(
int(method.find("id").text, 0),
method.find("name").text
))
self.syscalls.append(syscallLabel)
def getFunc(self, searchFor):
if type(searchFor) == str:
for function in self.functions:
if function.name == searchFor:
return function
elif type(searchFor) == int:
for function in self.functions:
if function.id == searchFor:
return function
def getSyscall(self, searchFor):
if type(searchFor) == str:
for syscall in self.syscalls:
if syscall.name == searchFor:
return syscall
elif type(searchFor) == int:
for syscall in self.syscalls:
if syscall.id == searchFor:
return syscall
def getGlobal(self, searchFor):
if type(searchFor) == str:
for globalVar in self.globals:
if globalVar.name == searchFor:
return globalVar
elif type(searchFor) == int:
for globalVar in self.globals:
if globalVar.id == searchFor:
return globalVar
| true | true |
f73afc2f3d5ae8fafc1cd7d0a54d0c6049227974 | 4,634 | py | Python | dimod/converters.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | null | null | null | dimod/converters.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | null | null | null | dimod/converters.py | randomir/dimod | 8f636168a620e0dc9d969fb0a813f6b4ba5a755c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
"""
Functions that convert binary quadratic models to and from other formats.
"""
from dimod.bqm.adjdictbqm import AdjDictBQM
__all__ = ['to_networkx_graph', 'from_networkx_graph']
def to_networkx_graph(bqm,
node_attribute_name='bias',
edge_attribute_name='bias'):
"""Convert a binary quadratic model to NetworkX graph format.
Note that NetworkX must be installed for this method to work.
Args:
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases.
Returns:
:class:`networkx.Graph`: A NetworkX graph with biases stored as
node/edge attributes.
"""
import networkx as nx
BQM = nx.Graph()
# add the linear biases
BQM.add_nodes_from(((v, {node_attribute_name: bias, 'vartype': bqm.vartype})
for v, bias in bqm.linear.items()))
# add the quadratic biases
BQM.add_edges_from(((u, v, {edge_attribute_name: bias})
for (u, v), bias in bqm.quadratic.items()))
# set the offset and vartype properties for the graph
BQM.offset = bqm.offset
BQM.vartype = bqm.vartype
return BQM
def from_networkx_graph(G, vartype=None,
node_attribute_name='bias', edge_attribute_name='bias',
cls=AdjDictBQM):
"""Create a binary quadratic model from a NetworkX graph.
Args:
G (:obj:`networkx.Graph`):
A NetworkX graph with biases stored as node/edge attributes.
vartype (:class:`.Vartype`/str/set, optional):
Variable type for the binary quadratic model. Accepted input
values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
If not provided, the `G` should have a vartype attribute. If
`vartype` is provided and `G.vartype` exists then the argument
overrides the property.
node_attribute_name (hashable, optional, default='bias'):
Attribute name for linear biases. If the node does not have a
matching attribute then the bias defaults to 0.
edge_attribute_name (hashable, optional, default='bias'):
Attribute name for quadratic biases. If the edge does not have a
matching attribute then the bias defaults to 0.
cls (type, optional):
The type of binary quadratic model to construct. Default is
:class:`.AdjDictBQM`.
Returns:
A binary quadratic model of type `cls`.
Examples:
This example creates a BQM from an illustrative NetworkX graph.
>>> import networkx as nx
>>> import random
...
>>> G = nx.generators.complete_graph(4)
>>> for node in G.nodes:
... G.nodes[node]['bias'] = random.choice([1,-1])
>>> for edge in G.edges:
... G.edges[edge]['quadratic'] = random.choice([1,-1])
...
>>> bqm = dimod.from_networkx_graph(G,
... vartype='BINARY',
... edge_attribute_name='quadratic',
... cls=dimod.AdjMapBQM)
"""
if vartype is None:
if not hasattr(G, 'vartype'):
msg = ("either 'vartype' argument must be provided or "
"the given graph should have a vartype attribute.")
raise ValueError(msg)
vartype = G.vartype
linear = {v: b for v, b in G.nodes(data=node_attribute_name, default=0)}
quadratic = {(u, v): b
for u, v, b in G.edges(data=edge_attribute_name, default=0)}
offset = getattr(G, 'offset', 0)
return cls(linear, quadratic, offset, vartype)
| 35.922481 | 80 | 0.594303 |
from dimod.bqm.adjdictbqm import AdjDictBQM
__all__ = ['to_networkx_graph', 'from_networkx_graph']
def to_networkx_graph(bqm,
node_attribute_name='bias',
edge_attribute_name='bias'):
import networkx as nx
BQM = nx.Graph()
BQM.add_nodes_from(((v, {node_attribute_name: bias, 'vartype': bqm.vartype})
for v, bias in bqm.linear.items()))
BQM.add_edges_from(((u, v, {edge_attribute_name: bias})
for (u, v), bias in bqm.quadratic.items()))
BQM.offset = bqm.offset
BQM.vartype = bqm.vartype
return BQM
def from_networkx_graph(G, vartype=None,
node_attribute_name='bias', edge_attribute_name='bias',
cls=AdjDictBQM):
if vartype is None:
if not hasattr(G, 'vartype'):
msg = ("either 'vartype' argument must be provided or "
"the given graph should have a vartype attribute.")
raise ValueError(msg)
vartype = G.vartype
linear = {v: b for v, b in G.nodes(data=node_attribute_name, default=0)}
quadratic = {(u, v): b
for u, v, b in G.edges(data=edge_attribute_name, default=0)}
offset = getattr(G, 'offset', 0)
return cls(linear, quadratic, offset, vartype)
| true | true |
f73afcd0822e7126176eee07d8981a396d91fd93 | 1,693 | py | Python | basic/basic_example1.py | mingewang/pytorch_deep_learning_by_example | 83c9e12364a359b9ef77f0645ca7815e9e817f58 | [
"MIT"
] | 7 | 2019-05-28T11:26:02.000Z | 2020-02-13T18:24:18.000Z | basic/basic_example1.py | mingewang/pytorch_deep_learning_by_example | 83c9e12364a359b9ef77f0645ca7815e9e817f58 | [
"MIT"
] | null | null | null | basic/basic_example1.py | mingewang/pytorch_deep_learning_by_example | 83c9e12364a359b9ef77f0645ca7815e9e817f58 | [
"MIT"
] | 5 | 2019-07-24T18:08:35.000Z | 2021-08-09T01:07:54.000Z | # -*- coding: utf-8 -*-
import torch
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
# Use the optim package to define an Optimizer that will update the weights of
# the model for us. Here we will use Adam; the optim package contains many other
# optimization algoriths. The first argument to the Adam constructor tells the
# optimizer which Tensors it should update.
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
epochs = 5
for t in range(500):
# Forward pass: compute predicted y by passing x to the model.
y_pred = model(x)
# Compute and print loss.
loss = loss_fn(y_pred, y)
print(t, loss.item())
# Before the backward pass, use the optimizer object to zero all of the
# gradients for the variables it will update (which are the learnable
# weights of the model). This is because by default, gradients are
# accumulated in buffers( i.e, not overwritten) whenever .backward()
# is called. Checkout docs of torch.autograd.backward for more details.
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters
loss.backward()
# Calling the step function on an Optimizer makes an update to its
# parameters
optimizer.step()
| 33.196078 | 80 | 0.712345 |
import torch
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
loss_fn = torch.nn.MSELoss(reduction='sum')
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
epochs = 5
for t in range(500):
y_pred = model(x)
loss = loss_fn(y_pred, y)
print(t, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
| true | true |
f73afd1e493ad48a2dbdfb848634a13d8e2b0146 | 2,219 | py | Python | build/repo/boost/boost-b2-msvc-invoke.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | build/repo/boost/boost-b2-msvc-invoke.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | build/repo/boost/boost-b2-msvc-invoke.py | fifoforlifo/nstd | 68f5b370e10b2d1e078027ecbc040b7eaa9e08aa | [
"BSL-1.0"
] | null | null | null | import os
import sys
import glob
script, boostDir, vcVer, vcDir, arch, runtimeLink, config, stageDir, logFilePath, guardFileBase, guardFilePath = sys.argv
# For this to work, modify tools\build\v2\tools\msvc.jam with this:
#
# local rule auto-detect-toolset-versions ( )
# {
# # pynja: add explicit control of VC directory via environment variable PYNJA_VC_DIR
# local pynja-vc-dir = os.environ PYNJA_VC_DIR ;
# if $(version) && $(pynja-vc-dir)
# {
# register-configuration $(version) : [ $(pynja-vc-dir) ] ;
# return $(version) ;
# }
#
### Original code ...
# if [ os.name ] in NT CYGWIN
if arch == "x86":
addrModel = '32'
else:
addrModel = '64'
os.chdir(boostDir)
os.environ["PYNJA_VC_DIR"] = vcDir
# extend this list with additional libs you wish to build
libnames = [
"chrono",
"date_time",
#"exception",
"filesystem",
#"graph",
#"graph_parallel",
#"iostreams",
"locale",
"math",
#"mpi",
"program_options",
#"python",
"regex",
#"serialization",
#"signals",
"system",
#"test",
"thread",
"timer",
#"wave",
]
withLibs = " ".join([("--with-" + libname) for libname in libnames])
cmdStatic = r'b2 --build-dir=built toolset=msvc-%(vcVer)s.0 link=static runtime-link=%(runtimeLink)s %(withLibs)s stage --stagedir="%(stageDir)s" -j 9 address-model=%(addrModel)s %(config)s 2>&1 >"%(logFilePath)s"' % locals()
cmdShared = r'b2 --build-dir=built toolset=msvc-%(vcVer)s.0 link=shared runtime-link=%(runtimeLink)s %(withLibs)s stage --stagedir="%(stageDir)s" -j 9 address-model=%(addrModel)s %(config)s 2>&1 >"%(logFilePath)s"' % locals()
if not os.path.exists(stageDir):
os.makedirs(stageDir)
# erase old/stale guard files
for oldGuard in glob.glob(u'%(guardFileBase)s_*' % locals()):
os.unlink(oldGuard)
# invoke boost build
exitcodeStatic = os.system(cmdStatic)
exitcodeShared = os.system(cmdShared)
if exitcodeStatic or exitcodeShared:
with open(logFilePath, 'r') as logFile:
logContents = logFile.read()
print('%s' % logContents)
sys.exit(1)
# success! create guard file
with open(guardFilePath, 'w+') as f:
pass
sys.exit(0)
| 28.818182 | 227 | 0.639928 | import os
import sys
import glob
script, boostDir, vcVer, vcDir, arch, runtimeLink, config, stageDir, logFilePath, guardFileBase, guardFilePath = sys.argv
] = vcDir
libnames = [
"chrono",
"date_time",
"filesystem",
"locale",
"math",
"program_options",
"regex",
"system",
"thread",
"timer",
]
withLibs = " ".join([("--with-" + libname) for libname in libnames])
cmdStatic = r'b2 --build-dir=built toolset=msvc-%(vcVer)s.0 link=static runtime-link=%(runtimeLink)s %(withLibs)s stage --stagedir="%(stageDir)s" -j 9 address-model=%(addrModel)s %(config)s 2>&1 >"%(logFilePath)s"' % locals()
cmdShared = r'b2 --build-dir=built toolset=msvc-%(vcVer)s.0 link=shared runtime-link=%(runtimeLink)s %(withLibs)s stage --stagedir="%(stageDir)s" -j 9 address-model=%(addrModel)s %(config)s 2>&1 >"%(logFilePath)s"' % locals()
if not os.path.exists(stageDir):
os.makedirs(stageDir)
for oldGuard in glob.glob(u'%(guardFileBase)s_*' % locals()):
os.unlink(oldGuard)
exitcodeStatic = os.system(cmdStatic)
exitcodeShared = os.system(cmdShared)
if exitcodeStatic or exitcodeShared:
with open(logFilePath, 'r') as logFile:
logContents = logFile.read()
print('%s' % logContents)
sys.exit(1)
with open(guardFilePath, 'w+') as f:
pass
sys.exit(0)
| true | true |
f73afd385ba51cd16c1bc54bdba73f68de01b930 | 2,979 | py | Python | jesse/modes/import_candles_mode/drivers/binance_futures.py | quantfor/jesse | 0a451132aaaf848b2ed6bcd0efa90d73c04d538d | [
"MIT"
] | null | null | null | jesse/modes/import_candles_mode/drivers/binance_futures.py | quantfor/jesse | 0a451132aaaf848b2ed6bcd0efa90d73c04d538d | [
"MIT"
] | null | null | null | jesse/modes/import_candles_mode/drivers/binance_futures.py | quantfor/jesse | 0a451132aaaf848b2ed6bcd0efa90d73c04d538d | [
"MIT"
] | null | null | null | import requests
import jesse.helpers as jh
from jesse import exceptions
from jesse.modes.import_candles_mode.drivers.interface import CandleExchange
class BinanceFutures(CandleExchange):
def __init__(self) -> None:
# import here instead of the top of the file to prevent possible the circular imports issue
from jesse.modes.import_candles_mode.drivers.binance import Binance
super().__init__(
name='Binance Futures',
count=1000,
rate_limit_per_second=2,
backup_exchange_class=Binance
)
self.endpoint = 'https://fapi.binance.com/fapi/v1/klines'
def get_starting_time(self, symbol) -> int:
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1d',
'symbol': dashless_symbol,
'limit': 1500,
}
response = requests.get(self.endpoint, params=payload)
# Exchange In Maintenance
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
# unsupported symbol
if response.status_code == 400:
raise ValueError(response.json()['msg'])
if response.status_code != 200:
raise Exception(response.content)
data = response.json()
# since the first timestamp doesn't include all the 1m
# candles, let's start since the second day then
first_timestamp = int(data[0][0])
return first_timestamp + 60_000 * 1440
def fetch(self, symbol, start_timestamp):
"""
note1: unlike Bitfinex, Binance does NOT skip candles with volume=0.
note2: like Bitfinex, start_time includes the candle and so does the end_time.
"""
end_timestamp = start_timestamp + (self.count - 1) * 60000
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1m',
'symbol': dashless_symbol,
'startTime': start_timestamp,
'endTime': end_timestamp,
'limit': self.count,
}
response = requests.get(self.endpoint, params=payload)
# Exchange In Maintenance
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
# unsupported symbol
if response.status_code == 400:
raise ValueError(response.json()['msg'])
if response.status_code != 200:
return
data = response.json()
return [{
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': self.name,
'timestamp': int(d[0]),
'open': float(d[1]),
'close': float(d[4]),
'high': float(d[2]),
'low': float(d[3]),
'volume': float(d[5])
} for d in data]
| 32.032258 | 100 | 0.58711 | import requests
import jesse.helpers as jh
from jesse import exceptions
from jesse.modes.import_candles_mode.drivers.interface import CandleExchange
class BinanceFutures(CandleExchange):
def __init__(self) -> None:
from jesse.modes.import_candles_mode.drivers.binance import Binance
super().__init__(
name='Binance Futures',
count=1000,
rate_limit_per_second=2,
backup_exchange_class=Binance
)
self.endpoint = 'https://fapi.binance.com/fapi/v1/klines'
def get_starting_time(self, symbol) -> int:
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1d',
'symbol': dashless_symbol,
'limit': 1500,
}
response = requests.get(self.endpoint, params=payload)
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
if response.status_code == 400:
raise ValueError(response.json()['msg'])
if response.status_code != 200:
raise Exception(response.content)
data = response.json()
# candles, let's start since the second day then
first_timestamp = int(data[0][0])
return first_timestamp + 60_000 * 1440
def fetch(self, symbol, start_timestamp):
end_timestamp = start_timestamp + (self.count - 1) * 60000
dashless_symbol = jh.dashless_symbol(symbol)
payload = {
'interval': '1m',
'symbol': dashless_symbol,
'startTime': start_timestamp,
'endTime': end_timestamp,
'limit': self.count,
}
response = requests.get(self.endpoint, params=payload)
if response.status_code == 502:
raise exceptions.ExchangeInMaintenance('ERROR: 502 Bad Gateway. Please try again later')
if response.status_code == 400:
raise ValueError(response.json()['msg'])
if response.status_code != 200:
return
data = response.json()
return [{
'id': jh.generate_unique_id(),
'symbol': symbol,
'exchange': self.name,
'timestamp': int(d[0]),
'open': float(d[1]),
'close': float(d[4]),
'high': float(d[2]),
'low': float(d[3]),
'volume': float(d[5])
} for d in data]
| true | true |
f73afd95f26f890041a6a3dff2db4d6fe0c59391 | 16,706 | py | Python | src/skmultiflow/trees/stacked_single_target_hoeffding_tree_regressor.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | 1 | 2021-06-17T18:46:42.000Z | 2021-06-17T18:46:42.000Z | src/skmultiflow/trees/stacked_single_target_hoeffding_tree_regressor.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/trees/stacked_single_target_hoeffding_tree_regressor.py | lambertsbennett/scikit-multiflow | bc714fd5ee4f0a486adc00ec6ae39eafa64f81cc | [
"BSD-3-Clause"
] | null | null | null | from operator import attrgetter
import numpy as np
from skmultiflow.core import MultiOutputMixin
from skmultiflow.trees import iSOUPTreeRegressor
from skmultiflow.utils import get_dimensions
from skmultiflow.trees.split_criterion import IntraClusterVarianceReductionSplitCriterion
from skmultiflow.trees.nodes import LearningNode
from skmultiflow.trees.nodes import SSTActiveLearningNode
from skmultiflow.trees.nodes import SSTActiveLearningNodeAdaptive
from skmultiflow.trees.nodes import SSTInactiveLearningNode
from skmultiflow.trees.nodes import SSTInactiveLearningNodeAdaptive
class StackedSingleTargetHoeffdingTreeRegressor(iSOUPTreeRegressor, MultiOutputMixin):
"""Stacked Single-target Hoeffding Tree regressor.
Implementation of the Stacked Single-target Hoeffding Tree (SST-HT) method
for multi-target regression as proposed by S. M. Mastelini, S. Barbon Jr.,
and A. C. P. L. F. de Carvalho [1]_.
Parameters
----------
max_byte_size: int (default=33554432)
Maximum memory consumed by the tree.
memory_estimate_period: int (default=1000000)
Number of instances between memory consumption checks.
grace_period: int (default=200)
Number of instances a leaf should observe between split attempts.
split_confidence: float (default=0.0000001)
Allowed error in split decision, a value closer to 0 takes longer to
decide.
tie_threshold: float (default=0.05)
Threshold below which a split will be forced to break ties.
binary_split: boolean (default=False)
If True, only allow binary splits.
stop_mem_management: boolean (default=False)
If True, stop growing as soon as memory limit is hit.
remove_poor_atts: boolean (default=False)
If True, disable poor attributes.
no_preprune: boolean (default=False)
If True, disable pre-pruning.
leaf_prediction: string (default='perceptron')
| Prediction mechanism used at leafs.
| 'perceptron' - Stacked perceptron
| 'adaptive' - Adaptively chooses between the best predictor (mean,
perceptron or stacked perceptron)
nb_threshold: int (default=0)
Number of instances a leaf should observe before allowing Naive Bayes.
nominal_attributes: list, optional
List of Nominal attributes. If emtpy, then assume that all attributes
are numerical.
learning_ratio_perceptron: float
The learning rate of the perceptron.
learning_ratio_decay: float
Decay multiplier for the learning rate of the perceptron
learning_ratio_const: Bool
If False the learning ratio will decay with the number of examples seen
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when leaf_prediction is 'perceptron'.
References
----------
.. [1] Mastelini, S. M., Barbon Jr, S., de Carvalho, A. C. P. L. F. (2019).
"Online Multi-target regression trees with stacked leaf models". arXiv
preprint arXiv:1903.12483.
Examples
--------
>>> # Imports
>>> from skmultiflow.data import RegressionGenerator
>>> from skmultiflow.trees import StackedSingleTargetHoeffdingTreeRegressor
>>> import numpy as np
>>>
>>> # Setup a data stream
>>> n_targets = 3
>>> stream = RegressionGenerator(n_targets=n_targets, random_state=1, n_samples=200)
>>>
>>> # Setup the Stacked Single-target Hoeffding Tree Regressor
>>> sst_ht = StackedSingleTargetHoeffdingTreeRegressor()
>>>
>>> # Auxiliary variables to control loop and track performance
>>> n_samples = 0
>>> max_samples = 200
>>> y_pred = np.zeros((max_samples, n_targets))
>>> y_true = np.zeros((max_samples, n_targets))
>>>
>>> # Run test-then-train loop for max_samples and while there is data
>>> while n_samples < max_samples and stream.has_more_samples():
>>> X, y = stream.next_sample()
>>> y_true[n_samples] = y[0]
>>> y_pred[n_samples] = sst_ht.predict(X)[0]
>>> sst_ht.partial_fit(X, y)
>>> n_samples += 1
>>>
>>> # Display results
>>> print('Stacked Single-target Hoeffding Tree regressor example')
>>> print('{} samples analyzed.'.format(n_samples))
>>> print('Mean absolute error: {}'.format(np.mean(np.abs(y_true - y_pred))))
"""
# =====================================================================
# == Stacked Single-target Hoeffding Regression Tree implementation ===
# =====================================================================
def __init__(self,
max_byte_size=33554432,
memory_estimate_period=1000000,
grace_period=200,
split_confidence=0.0000001,
tie_threshold=0.05,
binary_split=False,
stop_mem_management=False,
remove_poor_atts=False,
leaf_prediction='perceptron',
no_preprune=False,
nb_threshold=0,
nominal_attributes=None,
learning_ratio_perceptron=0.02,
learning_ratio_decay=0.001,
learning_ratio_const=True,
random_state=None):
super().__init__(max_byte_size=max_byte_size,
memory_estimate_period=memory_estimate_period,
grace_period=grace_period,
split_confidence=split_confidence,
tie_threshold=tie_threshold,
binary_split=binary_split,
stop_mem_management=stop_mem_management,
remove_poor_atts=remove_poor_atts,
no_preprune=no_preprune,
leaf_prediction=leaf_prediction,
nb_threshold=nb_threshold,
nominal_attributes=nominal_attributes)
self.split_criterion = 'icvr' # intra cluster variance reduction
self.learning_ratio_perceptron = learning_ratio_perceptron
self.learning_ratio_decay = learning_ratio_decay
self.learning_ratio_const = learning_ratio_const
self.random_state = random_state
self._tree_root = None
self._decision_node_cnt = 0
self._active_leaf_node_cnt = 0
self._inactive_leaf_node_cnt = 0
self._inactive_leaf_byte_size_estimate = 0.0
self._active_leaf_byte_size_estimate = 0.0
self._byte_size_estimate_overhead_fraction = 1.0
self._growth_allowed = True
self._train_weight_seen_by_model = 0.0
self.examples_seen = 0
self.sum_of_values = 0.0
self.sum_of_squares = 0.0
self.sum_of_attribute_values = 0.0
self.sum_of_attribute_squares = 0.0
# To add the n_targets property once
self._n_targets_set = False
@property
def leaf_prediction(self):
return self._leaf_prediction
@leaf_prediction.setter
def leaf_prediction(self, leaf_prediction):
if leaf_prediction not in {self._PERCEPTRON, self._ADAPTIVE}:
print("Invalid leaf_prediction option {}', will use default '{}'".
format(leaf_prediction, self._PERCEPTRON))
self._leaf_prediction = self._PERCEPTRON
else:
self._leaf_prediction = leaf_prediction
def _get_predictors_faded_error(self, X):
"""Get the faded error of the leaf corresponding to the pased instance.
Parameters
----------
X: numpy.ndarray of length equal to the number of features.
Instance attributes.
Returns
-------
dict (predictor, fmae)
"""
fmaes = {}
if self._tree_root is not None:
found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)
leaf_node = found_node.node
if leaf_node is None:
leaf_node = found_node.parent
if isinstance(leaf_node, LearningNode):
fmaes['mean'] = leaf_node.fMAE_M
fmaes['perceptron'] = leaf_node.fMAE_P
fmaes['stacked_perceptron'] = leaf_node.fMAE_SP
else:
# If the found node is not a learning node, give preference to
# the mean predictor
fmaes['mean'] = np.zeros(self._n_targets)
fmaes['perceptron'] = np.full(self._n_targets, np.Inf)
fmaes['stacked_perceptron'] = np.full(self._n_targets, np.Inf)
return fmaes
def _new_learning_node(self, initial_class_observations=None, parent_node=None,
is_active_node=True):
"""Create a new learning node. The type of learning node depends on
the tree configuration.
"""
if initial_class_observations is None:
initial_class_observations = {}
if is_active_node:
if self.leaf_prediction == self._PERCEPTRON:
return SSTActiveLearningNode(
initial_class_observations,
parent_node,
random_state=self.random_state
)
elif self.leaf_prediction == self._ADAPTIVE:
new_node = SSTActiveLearningNodeAdaptive(
initial_class_observations,
parent_node,
random_state=self.random_state
)
# Resets faded errors
new_node.fMAE_M = np.zeros(self._n_targets, dtype=np.float64)
new_node.fMAE_P = np.zeros(self._n_targets, dtype=np.float64)
new_node.fMAE_SP = np.zeros(self._n_targets, dtype=np.float64)
return new_node
else:
if self.leaf_prediction == self._PERCEPTRON:
return SSTInactiveLearningNode(
initial_class_observations,
parent_node,
random_state=parent_node.random_state
)
elif self.leaf_prediction == self._ADAPTIVE:
new_node = SSTInactiveLearningNodeAdaptive(
initial_class_observations,
parent_node,
random_state=parent_node.random_state
)
new_node.fMAE_M = parent_node.fMAE_M
new_node.fMAE_P = parent_node.fMAE_P
new_node.fMAE_SP = parent_node.fMAE_SP
return new_node
def predict(self, X):
"""Predicts the target value using mean class or the perceptron.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
Samples for which we want to predict the labels.
Returns
-------
list
Predicted target values.
"""
r, _ = get_dimensions(X)
try:
predictions = np.zeros((r, self._n_targets), dtype=np.float64)
except AttributeError:
return [0.0]
for i in range(r):
if self.leaf_prediction == self._PERCEPTRON:
if self.examples_seen > 1:
perceptron_weights = self.get_weights_for_instance(X[i])
if perceptron_weights is None:
# Instance was sorted to a non-learning node: use
# mean prediction
votes = self.get_votes_for_instance(X[i]).copy()
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
predictions[i] = sum_of_values / number_of_examples_seen
continue
normalized_sample = self.normalize_sample(X[i])
normalized_base_prediction = np.matmul(
perceptron_weights[0], normalized_sample
)
normalized_meta_prediction = np.matmul(
perceptron_weights[1],
np.append(normalized_base_prediction, 1.0)
)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values *
self.sum_of_values) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
# Samples are normalized using just one sd, as proposed in
# the iSoup-Tree method
predictions[i] = normalized_meta_prediction * sd + mean
elif self.leaf_prediction == self._ADAPTIVE:
if self.examples_seen > 1:
# Mean predictor
votes = self.get_votes_for_instance(X[i]).copy()
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
pred_M = sum_of_values / number_of_examples_seen
# Perceptron variants
perceptron_weights = self.get_weights_for_instance(X[i])
if perceptron_weights is None:
# Instance was sorted to a non-learning node: use
# mean prediction
predictions[i] = pred_M
continue
else:
normalized_sample = self.normalize_sample(X[i])
# Standard perceptron
normalized_base_prediction = np.matmul(
perceptron_weights[0], normalized_sample
)
# Stacked perceptron
normalized_meta_prediction = np.matmul(
perceptron_weights[1],
np.append(normalized_base_prediction, 1.0)
)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values *
self.sum_of_values) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
pred_P = normalized_base_prediction * sd + mean
pred_SP = normalized_meta_prediction * sd + mean
# Gets faded errors for the related leaf predictors
fmae = self._get_predictors_faded_error(X[i])
# Selects, for each target, the best current performer
for j in range(self._n_targets):
b_pred = np.argmin([fmae['mean'][j],
fmae['perceptron'][j],
fmae['stacked_perceptron'][j]]
)
if b_pred == 0:
# If all the expected errors are the same,
# use the standard perceptron
if fmae['mean'][j] == fmae['perceptron'][j] \
== fmae['stacked_perceptron'][j]:
predictions[i, j] = pred_P[j]
# Otherwise, use the simplest approach
else:
predictions[i, j] = pred_M[j]
else:
if b_pred == 1:
# Use the stacked perceptron if its expected
# error is the same than the error for the
# standard perceptron
if fmae['perceptron'][j] == \
fmae['stacked_perceptron'][j]:
predictions[i, j] = pred_SP[j]
else:
predictions[i, j] = pred_P[j]
else:
predictions[i, j] = pred_SP[j]
return predictions
| 44.549333 | 89 | 0.5592 | from operator import attrgetter
import numpy as np
from skmultiflow.core import MultiOutputMixin
from skmultiflow.trees import iSOUPTreeRegressor
from skmultiflow.utils import get_dimensions
from skmultiflow.trees.split_criterion import IntraClusterVarianceReductionSplitCriterion
from skmultiflow.trees.nodes import LearningNode
from skmultiflow.trees.nodes import SSTActiveLearningNode
from skmultiflow.trees.nodes import SSTActiveLearningNodeAdaptive
from skmultiflow.trees.nodes import SSTInactiveLearningNode
from skmultiflow.trees.nodes import SSTInactiveLearningNodeAdaptive
class StackedSingleTargetHoeffdingTreeRegressor(iSOUPTreeRegressor, MultiOutputMixin):
def __init__(self,
max_byte_size=33554432,
memory_estimate_period=1000000,
grace_period=200,
split_confidence=0.0000001,
tie_threshold=0.05,
binary_split=False,
stop_mem_management=False,
remove_poor_atts=False,
leaf_prediction='perceptron',
no_preprune=False,
nb_threshold=0,
nominal_attributes=None,
learning_ratio_perceptron=0.02,
learning_ratio_decay=0.001,
learning_ratio_const=True,
random_state=None):
super().__init__(max_byte_size=max_byte_size,
memory_estimate_period=memory_estimate_period,
grace_period=grace_period,
split_confidence=split_confidence,
tie_threshold=tie_threshold,
binary_split=binary_split,
stop_mem_management=stop_mem_management,
remove_poor_atts=remove_poor_atts,
no_preprune=no_preprune,
leaf_prediction=leaf_prediction,
nb_threshold=nb_threshold,
nominal_attributes=nominal_attributes)
self.split_criterion = 'icvr'
self.learning_ratio_perceptron = learning_ratio_perceptron
self.learning_ratio_decay = learning_ratio_decay
self.learning_ratio_const = learning_ratio_const
self.random_state = random_state
self._tree_root = None
self._decision_node_cnt = 0
self._active_leaf_node_cnt = 0
self._inactive_leaf_node_cnt = 0
self._inactive_leaf_byte_size_estimate = 0.0
self._active_leaf_byte_size_estimate = 0.0
self._byte_size_estimate_overhead_fraction = 1.0
self._growth_allowed = True
self._train_weight_seen_by_model = 0.0
self.examples_seen = 0
self.sum_of_values = 0.0
self.sum_of_squares = 0.0
self.sum_of_attribute_values = 0.0
self.sum_of_attribute_squares = 0.0
self._n_targets_set = False
@property
def leaf_prediction(self):
return self._leaf_prediction
@leaf_prediction.setter
def leaf_prediction(self, leaf_prediction):
if leaf_prediction not in {self._PERCEPTRON, self._ADAPTIVE}:
print("Invalid leaf_prediction option {}', will use default '{}'".
format(leaf_prediction, self._PERCEPTRON))
self._leaf_prediction = self._PERCEPTRON
else:
self._leaf_prediction = leaf_prediction
def _get_predictors_faded_error(self, X):
fmaes = {}
if self._tree_root is not None:
found_node = self._tree_root.filter_instance_to_leaf(X, None, -1)
leaf_node = found_node.node
if leaf_node is None:
leaf_node = found_node.parent
if isinstance(leaf_node, LearningNode):
fmaes['mean'] = leaf_node.fMAE_M
fmaes['perceptron'] = leaf_node.fMAE_P
fmaes['stacked_perceptron'] = leaf_node.fMAE_SP
else:
# If the found node is not a learning node, give preference to
# the mean predictor
fmaes['mean'] = np.zeros(self._n_targets)
fmaes['perceptron'] = np.full(self._n_targets, np.Inf)
fmaes['stacked_perceptron'] = np.full(self._n_targets, np.Inf)
return fmaes
def _new_learning_node(self, initial_class_observations=None, parent_node=None,
is_active_node=True):
if initial_class_observations is None:
initial_class_observations = {}
if is_active_node:
if self.leaf_prediction == self._PERCEPTRON:
return SSTActiveLearningNode(
initial_class_observations,
parent_node,
random_state=self.random_state
)
elif self.leaf_prediction == self._ADAPTIVE:
new_node = SSTActiveLearningNodeAdaptive(
initial_class_observations,
parent_node,
random_state=self.random_state
)
# Resets faded errors
new_node.fMAE_M = np.zeros(self._n_targets, dtype=np.float64)
new_node.fMAE_P = np.zeros(self._n_targets, dtype=np.float64)
new_node.fMAE_SP = np.zeros(self._n_targets, dtype=np.float64)
return new_node
else:
if self.leaf_prediction == self._PERCEPTRON:
return SSTInactiveLearningNode(
initial_class_observations,
parent_node,
random_state=parent_node.random_state
)
elif self.leaf_prediction == self._ADAPTIVE:
new_node = SSTInactiveLearningNodeAdaptive(
initial_class_observations,
parent_node,
random_state=parent_node.random_state
)
new_node.fMAE_M = parent_node.fMAE_M
new_node.fMAE_P = parent_node.fMAE_P
new_node.fMAE_SP = parent_node.fMAE_SP
return new_node
def predict(self, X):
r, _ = get_dimensions(X)
try:
predictions = np.zeros((r, self._n_targets), dtype=np.float64)
except AttributeError:
return [0.0]
for i in range(r):
if self.leaf_prediction == self._PERCEPTRON:
if self.examples_seen > 1:
perceptron_weights = self.get_weights_for_instance(X[i])
if perceptron_weights is None:
# Instance was sorted to a non-learning node: use
# mean prediction
votes = self.get_votes_for_instance(X[i]).copy()
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
predictions[i] = sum_of_values / number_of_examples_seen
continue
normalized_sample = self.normalize_sample(X[i])
normalized_base_prediction = np.matmul(
perceptron_weights[0], normalized_sample
)
normalized_meta_prediction = np.matmul(
perceptron_weights[1],
np.append(normalized_base_prediction, 1.0)
)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values *
self.sum_of_values) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
# Samples are normalized using just one sd, as proposed in
# the iSoup-Tree method
predictions[i] = normalized_meta_prediction * sd + mean
elif self.leaf_prediction == self._ADAPTIVE:
if self.examples_seen > 1:
# Mean predictor
votes = self.get_votes_for_instance(X[i]).copy()
number_of_examples_seen = votes[0]
sum_of_values = votes[1]
pred_M = sum_of_values / number_of_examples_seen
# Perceptron variants
perceptron_weights = self.get_weights_for_instance(X[i])
if perceptron_weights is None:
# Instance was sorted to a non-learning node: use
# mean prediction
predictions[i] = pred_M
continue
else:
normalized_sample = self.normalize_sample(X[i])
# Standard perceptron
normalized_base_prediction = np.matmul(
perceptron_weights[0], normalized_sample
)
# Stacked perceptron
normalized_meta_prediction = np.matmul(
perceptron_weights[1],
np.append(normalized_base_prediction, 1.0)
)
mean = self.sum_of_values / self.examples_seen
variance = (self.sum_of_squares -
(self.sum_of_values *
self.sum_of_values) /
self.examples_seen) / (self.examples_seen - 1)
sd = np.sqrt(variance, out=np.zeros_like(variance),
where=variance >= 0.0)
pred_P = normalized_base_prediction * sd + mean
pred_SP = normalized_meta_prediction * sd + mean
# Gets faded errors for the related leaf predictors
fmae = self._get_predictors_faded_error(X[i])
# Selects, for each target, the best current performer
for j in range(self._n_targets):
b_pred = np.argmin([fmae['mean'][j],
fmae['perceptron'][j],
fmae['stacked_perceptron'][j]]
)
if b_pred == 0:
# If all the expected errors are the same,
# use the standard perceptron
if fmae['mean'][j] == fmae['perceptron'][j] \
== fmae['stacked_perceptron'][j]:
predictions[i, j] = pred_P[j]
# Otherwise, use the simplest approach
else:
predictions[i, j] = pred_M[j]
else:
if b_pred == 1:
# Use the stacked perceptron if its expected
# error is the same than the error for the
# standard perceptron
if fmae['perceptron'][j] == \
fmae['stacked_perceptron'][j]:
predictions[i, j] = pred_SP[j]
else:
predictions[i, j] = pred_P[j]
else:
predictions[i, j] = pred_SP[j]
return predictions
| true | true |
f73afdcad8e818812662d41e891fdcf0eaf1cc95 | 7,622 | py | Python | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | src/api/bkuser_core/categories/plugins/ldap/adaptor.py | Canway-shiisa/bk-user | a049e80d12082960828015742cea4b041f4af796 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from dataclasses import dataclass, field
from typing import Any, Dict, List, NamedTuple, Optional
from django.utils.encoding import force_str
from ldap3.utils import dn as dn_utils
from bkuser_core.categories.plugins.constants import DYNAMIC_FIELDS_SETTING_KEY
from bkuser_core.categories.plugins.ldap.models import LdapDepartment, LdapUserProfile
from bkuser_core.user_settings.loader import ConfigProvider
logger = logging.getLogger(__name__)
@dataclass
class ProfileFieldMapper:
"""从 ldap 对象属性中获取用户字段"""
config_loader: ConfigProvider
embed_fields = [
"username",
"display_name",
"email",
"telephone",
]
dynamic_fields: List = field(default_factory=list)
def __post_init__(self):
self.dynamic_fields_mapping = self.config_loader.get(DYNAMIC_FIELDS_SETTING_KEY)
self.dynamic_fields = list(self.dynamic_fields_mapping.keys()) if self.dynamic_fields_mapping else []
def get_value(
self, field_name: str, user_meta: Dict[str, List[bytes]], remain_raw: bool = False, dynamic_field: bool = False
) -> Any:
"""通过 field_name 从 ldap 数据中获取具体值"""
# 获取自定义字段对应的属性值
if dynamic_field:
ldap_field_name = field_name
if ldap_field_name not in self.dynamic_fields_mapping.values():
logger.info("no config[%s] in configs of dynamic_fields_mapping", field_name)
return ""
else:
# 从目录配置中获取 字段名
ldap_field_name = self.config_loader.get(field_name)
if not ldap_field_name:
logger.info("no config[%s] in configs of category", field_name)
return ""
# 1. 通过字段名,获取具体值
if ldap_field_name not in user_meta or not user_meta[ldap_field_name]:
logger.info("field[%s] is missing in raw attributes of user data from ldap", field_name)
return ""
# 2. 类似 memberOf 字段,将会返回原始列表
if remain_raw:
return user_meta[ldap_field_name]
return force_str(user_meta[ldap_field_name][0])
def get_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
"""根据字段映射关系, 从 ldap 中获取 `field_name` 的值"""
values = {}
for field_name in self.embed_fields:
values.update({field_name: self.get_value(field_name, user_meta)})
return values
def get_dynamic_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
"""获取自定义字段 在ldap中的对应值"""
values = {}
if self.dynamic_fields:
values.update(
{
field_name: self.get_value(
field_name=self.dynamic_fields_mapping[field_name], user_meta=user_meta, dynamic_field=True
)
for field_name in self.dynamic_fields
}
)
return values
def get_user_attributes(self) -> list:
"""获取远端属性名列表"""
user_attributes = [self.config_loader[x] for x in self.embed_fields if self.config_loader.get(x)]
user_attributes.extend(
[self.dynamic_fields_mapping[x] for x in self.dynamic_fields if self.dynamic_fields_mapping.get(x)]
)
return user_attributes
def user_adapter(
code: str, user_meta: Dict[str, Any], field_mapper: ProfileFieldMapper, restrict_types: List[str]
) -> LdapUserProfile:
groups = field_mapper.get_value("user_member_of", user_meta["raw_attributes"], True) or []
return LdapUserProfile(
**field_mapper.get_values(user_meta["raw_attributes"]),
code=code,
extras=field_mapper.get_dynamic_values(user_meta["raw_attributes"]),
# TODO: 完成转换 departments 的逻辑
departments=[
# 根据约定, dn 中除去第一个成分以外的部分即为用户所在的部门, 因此需要取 [1:]
list(reversed(parse_dn_value_list(user_meta["dn"], restrict_types)[1:])),
# 用户与用户组之间的关系
*[list(reversed(parse_dn_value_list(force_str(group), restrict_types))) for group in groups],
],
)
def department_adapter(code: str, dept_meta: Dict, is_group: bool, restrict_types: List[str]) -> LdapDepartment:
dn = dept_meta["dn"]
dn_values = parse_dn_value_list(dn, restrict_types=restrict_types)
parent_dept: Optional[LdapDepartment] = None
for dept_name in reversed(dn_values):
parent_dept = LdapDepartment(
name=dept_name,
parent=parent_dept,
is_group=is_group,
)
assert parent_dept is not None, "未从 dn 中提取到任何部门信息"
parent_dept.code = code
return parent_dept
class RDN(NamedTuple):
"""RelativeDistinguishedName"""
type: str
value: str
separator: str
def parse_dn_tree(dn: str, restrict_types: List[str] = None) -> List[RDN]:
"""A DN is a sequence of relative distinguished names (RDN) connected by commas, For examples:
we have a dn = "CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", this method will parse the dn to:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
[RDN(type='CN', value='Jeff Smith', separator=','),
RDN(type='OU', value='Sales', separator=','),
RDN(type='DC', value='Fabrikam', separator=','),
RDN(type='DC', value='COM', separator='')]
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
Furthermore, restrict_types is Case-insensitive, the ["DC"], ["dc"], ["Dc"] are Exactly equal.
>>> parse_dn_tree("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["dc"])
[RDN(type='DC', value='Fabrikam', separator=','), RDN(type='DC', value='COM', separator='')]
See Also: https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ldap/distinguished-names
"""
restrict_types = [type_.upper() for type_ in (restrict_types or [])]
items = dn_utils.parse_dn(dn, escape=True)
if restrict_types:
parts = [RDN(*i) for i in items if i[0].upper() in restrict_types]
else:
parts = [RDN(*i) for i in items]
return parts
def parse_dn_value_list(dn: str, restrict_types: List[str] = None) -> List[str]:
"""this method work like parse_dn_tree, be only return values of those attributes, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM")
['Jeff Smith', 'Sales', 'Fabrikam', 'COM']
if provide restrict_types, this method will ignore the attribute not in restrict_types, For examples:
>>> parse_dn_value_list("CN=Jeff Smith,OU=Sales,DC=Fabrikam,DC=COM", restrict_types=["DC"])
['Fabrikam', 'COM']
"""
tree = parse_dn_tree(dn, restrict_types)
parts = []
for part in tree:
parts.append(part.value)
return parts
| 38.301508 | 119 | 0.66846 |
import logging
from dataclasses import dataclass, field
from typing import Any, Dict, List, NamedTuple, Optional
from django.utils.encoding import force_str
from ldap3.utils import dn as dn_utils
from bkuser_core.categories.plugins.constants import DYNAMIC_FIELDS_SETTING_KEY
from bkuser_core.categories.plugins.ldap.models import LdapDepartment, LdapUserProfile
from bkuser_core.user_settings.loader import ConfigProvider
logger = logging.getLogger(__name__)
@dataclass
class ProfileFieldMapper:
config_loader: ConfigProvider
embed_fields = [
"username",
"display_name",
"email",
"telephone",
]
dynamic_fields: List = field(default_factory=list)
def __post_init__(self):
self.dynamic_fields_mapping = self.config_loader.get(DYNAMIC_FIELDS_SETTING_KEY)
self.dynamic_fields = list(self.dynamic_fields_mapping.keys()) if self.dynamic_fields_mapping else []
def get_value(
self, field_name: str, user_meta: Dict[str, List[bytes]], remain_raw: bool = False, dynamic_field: bool = False
) -> Any:
if dynamic_field:
ldap_field_name = field_name
if ldap_field_name not in self.dynamic_fields_mapping.values():
logger.info("no config[%s] in configs of dynamic_fields_mapping", field_name)
return ""
else:
ldap_field_name = self.config_loader.get(field_name)
if not ldap_field_name:
logger.info("no config[%s] in configs of category", field_name)
return ""
if ldap_field_name not in user_meta or not user_meta[ldap_field_name]:
logger.info("field[%s] is missing in raw attributes of user data from ldap", field_name)
return ""
if remain_raw:
return user_meta[ldap_field_name]
return force_str(user_meta[ldap_field_name][0])
def get_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
values = {}
for field_name in self.embed_fields:
values.update({field_name: self.get_value(field_name, user_meta)})
return values
def get_dynamic_values(self, user_meta: Dict[str, List[bytes]]) -> Dict[str, Any]:
values = {}
if self.dynamic_fields:
values.update(
{
field_name: self.get_value(
field_name=self.dynamic_fields_mapping[field_name], user_meta=user_meta, dynamic_field=True
)
for field_name in self.dynamic_fields
}
)
return values
def get_user_attributes(self) -> list:
user_attributes = [self.config_loader[x] for x in self.embed_fields if self.config_loader.get(x)]
user_attributes.extend(
[self.dynamic_fields_mapping[x] for x in self.dynamic_fields if self.dynamic_fields_mapping.get(x)]
)
return user_attributes
def user_adapter(
code: str, user_meta: Dict[str, Any], field_mapper: ProfileFieldMapper, restrict_types: List[str]
) -> LdapUserProfile:
groups = field_mapper.get_value("user_member_of", user_meta["raw_attributes"], True) or []
return LdapUserProfile(
**field_mapper.get_values(user_meta["raw_attributes"]),
code=code,
extras=field_mapper.get_dynamic_values(user_meta["raw_attributes"]),
departments=[
list(reversed(parse_dn_value_list(user_meta["dn"], restrict_types)[1:])),
*[list(reversed(parse_dn_value_list(force_str(group), restrict_types))) for group in groups],
],
)
def department_adapter(code: str, dept_meta: Dict, is_group: bool, restrict_types: List[str]) -> LdapDepartment:
dn = dept_meta["dn"]
dn_values = parse_dn_value_list(dn, restrict_types=restrict_types)
parent_dept: Optional[LdapDepartment] = None
for dept_name in reversed(dn_values):
parent_dept = LdapDepartment(
name=dept_name,
parent=parent_dept,
is_group=is_group,
)
assert parent_dept is not None, "未从 dn 中提取到任何部门信息"
parent_dept.code = code
return parent_dept
class RDN(NamedTuple):
type: str
value: str
separator: str
def parse_dn_tree(dn: str, restrict_types: List[str] = None) -> List[RDN]:
restrict_types = [type_.upper() for type_ in (restrict_types or [])]
items = dn_utils.parse_dn(dn, escape=True)
if restrict_types:
parts = [RDN(*i) for i in items if i[0].upper() in restrict_types]
else:
parts = [RDN(*i) for i in items]
return parts
def parse_dn_value_list(dn: str, restrict_types: List[str] = None) -> List[str]:
tree = parse_dn_tree(dn, restrict_types)
parts = []
for part in tree:
parts.append(part.value)
return parts
| true | true |
f73afef58d65deabe13b39d87236bc23035e79be | 1,568 | py | Python | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | wrappers/python/tests/wallet/test_open_wallet.py | Diiaablo95/indy-sdk | 0ef7321f5902683af928cdc7ea94d522bee33d30 | [
"Apache-2.0"
] | null | null | null | import pytest
from indy import IndyError
from indy import wallet
from indy.error import ErrorCode
@pytest.mark.asyncio
@pytest.mark.parametrize("wallet_config", [None, '{"freshness_time":1000}'])
async def test_open_wallet_works(wallet_config, wallet_handle):
pass
@pytest.mark.asyncio
async def test_open_wallet_works_for_not_created_wallet(credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet('wallet_not_created', None, credentials)
assert ErrorCode.WalletNotFoundError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_twice(wallet_name, wallet_handle, credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, credentials)
assert ErrorCode.WalletAlreadyOpenedError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_missed_key(xwallet, wallet_name):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, "{}")
assert ErrorCode.WalletInputError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_changing_credentials(pool_name):
await wallet.create_wallet(pool_name, 'works_for_changing_credentials', None, None, '{"key":"key"}')
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"key", "rekey":"other_key"}')
await wallet.close_wallet(handle)
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"other_key"}')
await wallet.close_wallet(handle)
| 35.636364 | 115 | 0.776148 | import pytest
from indy import IndyError
from indy import wallet
from indy.error import ErrorCode
@pytest.mark.asyncio
@pytest.mark.parametrize("wallet_config", [None, '{"freshness_time":1000}'])
async def test_open_wallet_works(wallet_config, wallet_handle):
pass
@pytest.mark.asyncio
async def test_open_wallet_works_for_not_created_wallet(credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet('wallet_not_created', None, credentials)
assert ErrorCode.WalletNotFoundError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_twice(wallet_name, wallet_handle, credentials):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, credentials)
assert ErrorCode.WalletAlreadyOpenedError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_missed_key(xwallet, wallet_name):
with pytest.raises(IndyError) as e:
await wallet.open_wallet(wallet_name, None, "{}")
assert ErrorCode.WalletInputError == e.value.error_code
@pytest.mark.asyncio
async def test_open_wallet_works_for_changing_credentials(pool_name):
await wallet.create_wallet(pool_name, 'works_for_changing_credentials', None, None, '{"key":"key"}')
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"key", "rekey":"other_key"}')
await wallet.close_wallet(handle)
handle = await wallet.open_wallet('works_for_changing_credentials', None, '{"key":"other_key"}')
await wallet.close_wallet(handle)
| true | true |
f73b00e0feedd153a750b316f06b75f9098f979e | 6,487 | py | Python | pygdf/buffer.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 5 | 2019-01-15T12:31:49.000Z | 2021-03-05T21:17:13.000Z | pygdf/buffer.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | 1 | 2019-06-18T20:58:21.000Z | 2019-06-18T20:58:21.000Z | pygdf/buffer.py | mrocklin/pygdf | 2de9407427da9497ebdf8951a12857be0fab31bb | [
"Apache-2.0"
] | null | null | null |
import numpy as np
from numba import cuda
from . import cudautils, utils
from .serialize import register_distributed_serializer
class Buffer(object):
"""A 1D gpu buffer.
"""
_cached_ipch = None
@classmethod
def from_empty(cls, mem):
"""From empty device array
"""
return cls(mem, size=0, capacity=mem.size)
@classmethod
def null(cls, dtype):
"""Create a "null" buffer with a zero-sized device array.
"""
mem = cuda.device_array(0, dtype=dtype)
return cls(mem, size=0, capacity=0)
def __init__(self, mem, size=None, capacity=None, categorical=False):
if size is None:
if categorical:
size = len(mem)
else:
size = mem.size
if capacity is None:
capacity = size
self.mem = cudautils.to_device(mem)
_BufferSentry(self.mem).ndim(1)
self.size = size
self.capacity = capacity
self.dtype = self.mem.dtype
def serialize(self, serialize, context=None):
"""Called when dask.distributed is performing a serialization on this
object.
Do not use this directly. It is invoked by dask.distributed.
Parameters
----------
serialize : callable
Used to serialize data that needs serialization .
context : dict; optional
If not ``None``, it contains information about the destination.
Returns
-------
(header, frames)
See custom serialization documentation in dask.distributed.
"""
from .serialize import should_use_ipc
# Use destination info to determine if we should do IPC.
use_ipc = should_use_ipc(context)
header = {}
# Should use IPC transfer
if use_ipc:
# Reuse IPC handle from previous call?
if self._cached_ipch is not None:
ipch = self._cached_ipch
else:
# Get new IPC handle
ipch = self.to_gpu_array().get_ipc_handle()
header['kind'] = 'ipc'
header['mem'], frames = serialize(ipch)
# Keep IPC handle alive
self._cached_ipch = ipch
# Not using IPC transfer
else:
header['kind'] = 'normal'
# Serialize the buffer as a numpy array
header['mem'], frames = serialize(self.to_array())
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
"""Called when dask.distributed is performing a deserialization for
data of this class.
Do not use this directly. It is invoked by dask.distributed.
Parameters
----------
deserialize : callable
Used to deserialize data that needs further deserialization .
header, frames : dict
See custom serialization documentation in dask.distributed.
Returns
-------
obj : Buffer
Returns an instance of Buffer.
"""
# Using IPC?
if header['kind'] == 'ipc':
ipch = deserialize(header['mem'], frames)
# Open IPC handle
with ipch as data:
# Copy remote data over
mem = cuda.device_array_like(data)
mem.copy_to_device(data)
# Not using IPC
else:
# Deserialize the numpy array
mem = deserialize(header['mem'], frames)
mem.flags['WRITEABLE'] = True # XXX: hack for numba to work
return Buffer(mem)
def __reduce__(self):
cpumem = self.to_array()
# Note: pickled Buffer only stores *size* element.
return type(self), (cpumem,)
def __sizeof__(self):
return int(self.mem.alloc_size)
def __getitem__(self, arg):
if isinstance(arg, slice):
sliced = self.to_gpu_array()[arg]
buf = Buffer(sliced)
buf.dtype = self.dtype # for np.datetime64 support
return buf
elif isinstance(arg, int):
arg = utils.normalize_index(arg, self.size)
# the dtype argument is necessary for datetime64 support
# because currently we can't pass datetime64 types into
# cuda dev arrays, so the type of the cuda dev array is
# an i64, and we view it as the dtype on the buffer
return self.mem[arg].view(self.dtype)
else:
raise NotImplementedError(type(arg))
@property
def avail_space(self):
return self.capacity - self.size
def _sentry_capacity(self, size_needed):
if size_needed > self.avail_space:
raise MemoryError('insufficient space in buffer')
def append(self, element):
self._sentry_capacity(1)
self.extend(np.asarray(element, dtype=self.dtype))
def extend(self, array):
needed = array.size
self._sentry_capacity(needed)
array = cudautils.astype(array, dtype=self.dtype)
self.mem[self.size:].copy_to_device(array)
self.size += needed
def astype(self, dtype):
if self.dtype == dtype:
return self
else:
return Buffer(cudautils.astype(self.mem, dtype=dtype))
def to_array(self):
return self.to_gpu_array().copy_to_host()
def to_gpu_array(self):
return self.mem[:self.size]
def copy(self):
"""Deep copy the buffer
"""
return Buffer(mem=cudautils.copy_array(self.mem),
size=self.size, capacity=self.capacity)
def as_contiguous(self):
out = Buffer(mem=cudautils.as_contiguous(self.mem),
size=self.size, capacity=self.capacity)
assert out.is_contiguous()
return out
def is_contiguous(self):
return self.mem.is_c_contiguous()
class BufferSentryError(ValueError):
pass
class _BufferSentry(object):
def __init__(self, buf):
self._buf = buf
def dtype(self, dtype):
if self._buf.dtype != dtype:
raise BufferSentryError('dtype mismatch')
return self
def ndim(self, ndim):
if self._buf.ndim != ndim:
raise BufferSentryError('ndim mismatch')
return self
def contig(self):
if not self._buf.is_c_contiguous():
raise BufferSentryError('non contiguous')
register_distributed_serializer(Buffer)
| 30.313084 | 77 | 0.586712 |
import numpy as np
from numba import cuda
from . import cudautils, utils
from .serialize import register_distributed_serializer
class Buffer(object):
_cached_ipch = None
@classmethod
def from_empty(cls, mem):
return cls(mem, size=0, capacity=mem.size)
@classmethod
def null(cls, dtype):
mem = cuda.device_array(0, dtype=dtype)
return cls(mem, size=0, capacity=0)
def __init__(self, mem, size=None, capacity=None, categorical=False):
if size is None:
if categorical:
size = len(mem)
else:
size = mem.size
if capacity is None:
capacity = size
self.mem = cudautils.to_device(mem)
_BufferSentry(self.mem).ndim(1)
self.size = size
self.capacity = capacity
self.dtype = self.mem.dtype
def serialize(self, serialize, context=None):
from .serialize import should_use_ipc
use_ipc = should_use_ipc(context)
header = {}
if use_ipc:
if self._cached_ipch is not None:
ipch = self._cached_ipch
else:
ipch = self.to_gpu_array().get_ipc_handle()
header['kind'] = 'ipc'
header['mem'], frames = serialize(ipch)
self._cached_ipch = ipch
else:
header['kind'] = 'normal'
header['mem'], frames = serialize(self.to_array())
return header, frames
@classmethod
def deserialize(cls, deserialize, header, frames):
if header['kind'] == 'ipc':
ipch = deserialize(header['mem'], frames)
with ipch as data:
mem = cuda.device_array_like(data)
mem.copy_to_device(data)
else:
mem = deserialize(header['mem'], frames)
mem.flags['WRITEABLE'] = True
return Buffer(mem)
def __reduce__(self):
cpumem = self.to_array()
return type(self), (cpumem,)
def __sizeof__(self):
return int(self.mem.alloc_size)
def __getitem__(self, arg):
if isinstance(arg, slice):
sliced = self.to_gpu_array()[arg]
buf = Buffer(sliced)
buf.dtype = self.dtype
return buf
elif isinstance(arg, int):
arg = utils.normalize_index(arg, self.size)
# cuda dev arrays, so the type of the cuda dev array is
# an i64, and we view it as the dtype on the buffer
return self.mem[arg].view(self.dtype)
else:
raise NotImplementedError(type(arg))
@property
def avail_space(self):
return self.capacity - self.size
def _sentry_capacity(self, size_needed):
if size_needed > self.avail_space:
raise MemoryError('insufficient space in buffer')
def append(self, element):
self._sentry_capacity(1)
self.extend(np.asarray(element, dtype=self.dtype))
def extend(self, array):
needed = array.size
self._sentry_capacity(needed)
array = cudautils.astype(array, dtype=self.dtype)
self.mem[self.size:].copy_to_device(array)
self.size += needed
def astype(self, dtype):
if self.dtype == dtype:
return self
else:
return Buffer(cudautils.astype(self.mem, dtype=dtype))
def to_array(self):
return self.to_gpu_array().copy_to_host()
def to_gpu_array(self):
return self.mem[:self.size]
def copy(self):
return Buffer(mem=cudautils.copy_array(self.mem),
size=self.size, capacity=self.capacity)
def as_contiguous(self):
out = Buffer(mem=cudautils.as_contiguous(self.mem),
size=self.size, capacity=self.capacity)
assert out.is_contiguous()
return out
def is_contiguous(self):
return self.mem.is_c_contiguous()
class BufferSentryError(ValueError):
pass
class _BufferSentry(object):
def __init__(self, buf):
self._buf = buf
def dtype(self, dtype):
if self._buf.dtype != dtype:
raise BufferSentryError('dtype mismatch')
return self
def ndim(self, ndim):
if self._buf.ndim != ndim:
raise BufferSentryError('ndim mismatch')
return self
def contig(self):
if not self._buf.is_c_contiguous():
raise BufferSentryError('non contiguous')
register_distributed_serializer(Buffer)
| true | true |
f73b01975ebb13cdcf0df9646b4dbb50b9335e5a | 51 | py | Python | plugins/holland.backup.pgdump/holland/backup/pgdump/__init__.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | 1 | 2019-06-06T01:07:34.000Z | 2019-06-06T01:07:34.000Z | plugins/holland.backup.pgdump/holland/backup/pgdump/__init__.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | null | null | null | plugins/holland.backup.pgdump/holland/backup/pgdump/__init__.py | a5a351e7/holland | 58a12a5ce10206eed9434ab42b02217de29784bb | [
"BSD-3-Clause"
] | 2 | 2015-12-04T12:17:59.000Z | 2022-03-23T07:22:02.000Z | from holland.backup.pgdump.interface import PgDump
| 25.5 | 50 | 0.862745 | from holland.backup.pgdump.interface import PgDump
| true | true |
f73b0279e5d187de3ecac7a5ffeabbd5fe32d864 | 7,205 | py | Python | ResNet-50/SVHN/train.py | GuanlinLee/FPD-for-Adversarial-Robustness | 76b06cb8a68469f8ed4ed6bb5479ee86719175fb | [
"MIT"
] | 11 | 2020-03-16T06:07:14.000Z | 2021-09-08T11:51:23.000Z | ResNet-50/SVHN/train.py | GuanlinLee/FPD-for-Adversarial-Robustness | 76b06cb8a68469f8ed4ed6bb5479ee86719175fb | [
"MIT"
] | 2 | 2020-08-25T06:03:50.000Z | 2021-07-08T03:47:09.000Z | ResNet-50/SVHN/train.py | GuanlinLee/FPD-for-Adversarial-Robustness | 76b06cb8a68469f8ed4ed6bb5479ee86719175fb | [
"MIT"
] | 5 | 2020-03-16T12:29:21.000Z | 2021-12-13T07:32:30.000Z | import torchvision
import torchvision.transforms as transforms
import torch
import torch.utils.data
import resnet
from torch.autograd import Variable
from torch import nn
import early_stop
from tqdm import tqdm
import os,sys
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
train_globa_step=0
val_globa_step=0
wd=1e-50
learning_rate=1e-4
epochs=100
batch_size=300
torch.backends.cudnn.benchmark = True
transform=transforms.Compose([
torchvision.transforms.Resize((64,64)),
torchvision.transforms.ToTensor(),
])
trainset = torchvision.datasets.SVHN(root='./data',split='train', download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=16)
transform_test=transforms.Compose([torchvision.transforms.Resize((64,64)),
transforms.ToTensor(),
])
testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=16)
n = resnet.resnet101().cuda()
weight_p, bias_p = [],[]
for name, p in n.named_parameters():
if 'bias' in name:
bias_p += [p]
else:
weight_p += [p]
trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))
base_params = filter(lambda p: id(p) not in trans_params,
n.parameters())
base_params = filter(lambda p: id(p) not in class_params,
base_params)
loss1 =nn.MSELoss()
loss1.cuda()
loss2=nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adam([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate},
{'params':n.group2.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
opt = torch.optim.Adam([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
if os.path.exists('bestmodel_params.pkl'):
checkpoint = torch.load('bestmodel_params.pkl')
n.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_state_dict'])
opt.load_state_dict(checkpoint['opt_state_dict2'])
sch=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.1,patience=10)
es=early_stop.EarlyStopping('max',patience=20)
for epoch in range(epochs):
loadertrain = tqdm(trainloader, desc='{} E{:03d}'.format('train', epoch), ncols=0)
loadertest = tqdm(testloader, desc='{} E{:03d}'.format('test', epoch), ncols=0)
epoch_loss = 0.0
correct=0.0
total=0.0
total2=0.0
correct2=0.0
for x_train, y_train in loadertrain:
n.train()
x_train, y_train = Variable(x_train.cuda()),Variable(y_train.cuda())
x_noise=torch.FloatTensor(x_train.size(0),3,64,64).uniform_(-0.01,0.01)
x_noise=torch.clamp(x_noise,-0.01,0.01)
x_train_noise=x_train+Variable(x_noise.cuda())
y_pre,c_pre = n(x_train_noise)
y_pre=y_pre.cuda()
n.zero_grad()
optimizer.zero_grad()
loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_train,1.0))
if loss.item()>3:
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
opt.step()
epoch_loss += loss.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
torch.cuda.empty_cache()
else:
loss_cl=loss2(c_pre,y_train)
loss_sum=torch.mul(loss,1/1)+loss_cl
loss_sum.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
optimizer.step()
epoch_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
train_globa_step+=1
torch.cuda.empty_cache()
if loss.item()<3:
y_pre2, c_pre2 = n(y_pre)
y_pre2 = y_pre2.cuda()
n.zero_grad()
optimizer.zero_grad()
lossreg2 = loss1(torch.mul(y_pre2, 1.0), torch.mul( x_train, 1.0))
loss_cl2 = loss2(c_pre2, y_train)
_, predicted2 = torch.max(c_pre2.data, 1)
total2 += y_train.size(0)
correct2 += predicted2.eq(y_train.data).cuda().sum()
loss_sum2 = torch.mul(lossreg2, 1 / 1) + loss_cl2
loss_sum2.backward()
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
optimizer.step()
torch.cuda.empty_cache()
if train_globa_step% 20==0:
n.eval()
checkpoint = {
'state_dict': n.state_dict(),
'opt_state_dict': optimizer.state_dict(),
'opt_state_dict2':opt.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, 'model_params.pkl')
fmt = '{:.4f}'.format
loadertrain.set_postfix(loss=fmt(loss.data.item()),
acc=fmt(correct.item() / total * 100))
if (epoch) % 1 ==0:
test_loss = 0.0
correct = 0.0
total = 0.0
n.eval()
with torch.no_grad():
for x_test, y_test in loadertest:
x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())
y_pre, c_pre = n(x_test)
y_pre = y_pre.cuda()
loss_cl = loss2(c_pre, y_test)
loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_test,1.0))
loss_sum = torch.mul(loss,1/1) + loss_cl
test_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_test.size(0)
correct += predicted.eq(y_test.data).cuda().sum()
val_globa_step+=1
fmt = '{:.4f}'.format
loadertest.set_postfix(loss=fmt(loss_sum.data.item()),
acc=fmt(correct.item() / total * 100))
sch.step(test_loss)
fl=es.step(correct.item()/total*100, n,optimizer,opt,epoch)
if fl:
torch.cuda.empty_cache()
sys.exit(0)
torch.cuda.empty_cache() | 37.526042 | 105 | 0.54018 | import torchvision
import torchvision.transforms as transforms
import torch
import torch.utils.data
import resnet
from torch.autograd import Variable
from torch import nn
import early_stop
from tqdm import tqdm
import os,sys
import numpy as np
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
train_globa_step=0
val_globa_step=0
wd=1e-50
learning_rate=1e-4
epochs=100
batch_size=300
torch.backends.cudnn.benchmark = True
transform=transforms.Compose([
torchvision.transforms.Resize((64,64)),
torchvision.transforms.ToTensor(),
])
trainset = torchvision.datasets.SVHN(root='./data',split='train', download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
shuffle=True, num_workers=16)
transform_test=transforms.Compose([torchvision.transforms.Resize((64,64)),
transforms.ToTensor(),
])
testset = torchvision.datasets.SVHN(root='./data', split='test', download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=16)
n = resnet.resnet101().cuda()
weight_p, bias_p = [],[]
for name, p in n.named_parameters():
if 'bias' in name:
bias_p += [p]
else:
weight_p += [p]
trans_params = list(map(id, n.trans_conv.parameters()))
class_params = list(map(id, n.group2.parameters()))
base_params = filter(lambda p: id(p) not in trans_params,
n.parameters())
base_params = filter(lambda p: id(p) not in class_params,
base_params)
loss1 =nn.MSELoss()
loss1.cuda()
loss2=nn.CrossEntropyLoss()
loss2.cuda()
optimizer = torch.optim.Adam([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate},
{'params':n.group2.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
opt = torch.optim.Adam([{'params': base_params},
{'params':n.trans_conv.parameters(),'lr':learning_rate}],
lr=learning_rate,weight_decay=wd)
if os.path.exists('bestmodel_params.pkl'):
checkpoint = torch.load('bestmodel_params.pkl')
n.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['opt_state_dict'])
opt.load_state_dict(checkpoint['opt_state_dict2'])
sch=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=0.1,patience=10)
es=early_stop.EarlyStopping('max',patience=20)
for epoch in range(epochs):
loadertrain = tqdm(trainloader, desc='{} E{:03d}'.format('train', epoch), ncols=0)
loadertest = tqdm(testloader, desc='{} E{:03d}'.format('test', epoch), ncols=0)
epoch_loss = 0.0
correct=0.0
total=0.0
total2=0.0
correct2=0.0
for x_train, y_train in loadertrain:
n.train()
x_train, y_train = Variable(x_train.cuda()),Variable(y_train.cuda())
x_noise=torch.FloatTensor(x_train.size(0),3,64,64).uniform_(-0.01,0.01)
x_noise=torch.clamp(x_noise,-0.01,0.01)
x_train_noise=x_train+Variable(x_noise.cuda())
y_pre,c_pre = n(x_train_noise)
y_pre=y_pre.cuda()
n.zero_grad()
optimizer.zero_grad()
loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_train,1.0))
if loss.item()>3:
loss.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
opt.step()
epoch_loss += loss.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
torch.cuda.empty_cache()
else:
loss_cl=loss2(c_pre,y_train)
loss_sum=torch.mul(loss,1/1)+loss_cl
loss_sum.backward(retain_graph=True)
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
optimizer.step()
epoch_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_train.size(0)
correct += predicted.eq(y_train.data).cuda().sum()
train_globa_step+=1
torch.cuda.empty_cache()
if loss.item()<3:
y_pre2, c_pre2 = n(y_pre)
y_pre2 = y_pre2.cuda()
n.zero_grad()
optimizer.zero_grad()
lossreg2 = loss1(torch.mul(y_pre2, 1.0), torch.mul( x_train, 1.0))
loss_cl2 = loss2(c_pre2, y_train)
_, predicted2 = torch.max(c_pre2.data, 1)
total2 += y_train.size(0)
correct2 += predicted2.eq(y_train.data).cuda().sum()
loss_sum2 = torch.mul(lossreg2, 1 / 1) + loss_cl2
loss_sum2.backward()
torch.nn.utils.clip_grad_norm_(n.parameters(), 5.0)
optimizer.step()
torch.cuda.empty_cache()
if train_globa_step% 20==0:
n.eval()
checkpoint = {
'state_dict': n.state_dict(),
'opt_state_dict': optimizer.state_dict(),
'opt_state_dict2':opt.state_dict(),
'epoch': epoch
}
torch.save(checkpoint, 'model_params.pkl')
fmt = '{:.4f}'.format
loadertrain.set_postfix(loss=fmt(loss.data.item()),
acc=fmt(correct.item() / total * 100))
if (epoch) % 1 ==0:
test_loss = 0.0
correct = 0.0
total = 0.0
n.eval()
with torch.no_grad():
for x_test, y_test in loadertest:
x_test, y_test = Variable(x_test.cuda()), Variable(y_test.cuda())
y_pre, c_pre = n(x_test)
y_pre = y_pre.cuda()
loss_cl = loss2(c_pre, y_test)
loss = loss1(torch.mul(y_pre,1.0), torch.mul( x_test,1.0))
loss_sum = torch.mul(loss,1/1) + loss_cl
test_loss += loss_sum.data.item()
_, predicted = torch.max(c_pre.data, 1)
total += y_test.size(0)
correct += predicted.eq(y_test.data).cuda().sum()
val_globa_step+=1
fmt = '{:.4f}'.format
loadertest.set_postfix(loss=fmt(loss_sum.data.item()),
acc=fmt(correct.item() / total * 100))
sch.step(test_loss)
fl=es.step(correct.item()/total*100, n,optimizer,opt,epoch)
if fl:
torch.cuda.empty_cache()
sys.exit(0)
torch.cuda.empty_cache() | true | true |
f73b046b13174e6adabf622521b086d561673104 | 4,362 | py | Python | splunktaucclib/rest_handler/endpoint/__init__.py | splunk/addonfactory-ucc-library | 0c020fc46a38d17fb50614d7bcce840d7ec6af45 | [
"Apache-2.0"
] | 7 | 2021-03-18T23:46:26.000Z | 2021-11-16T13:49:00.000Z | splunktaucclib/rest_handler/endpoint/__init__.py | splunk/addonfactory-ucc-library | 0c020fc46a38d17fb50614d7bcce840d7ec6af45 | [
"Apache-2.0"
] | 103 | 2020-10-21T06:18:29.000Z | 2022-02-14T16:54:47.000Z | splunktaucclib/rest_handler/endpoint/__init__.py | splunk/addonfactory-ucc-library | 0c020fc46a38d17fb50614d7bcce840d7ec6af45 | [
"Apache-2.0"
] | 4 | 2021-03-22T19:39:33.000Z | 2022-03-27T16:52:11.000Z | #
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ..error import RestError
from ..util import get_base_app_name
__all__ = [
"RestModel",
"RestEndpoint",
"SingleModel",
"MultipleModel",
"DataInputModel",
]
class RestModel:
def __init__(self, fields, name=None):
"""
REST Model.
:param name:
:param fields:
"""
self.name = name
self.fields = fields
class RestEndpoint:
"""
REST Endpoint.
"""
def __init__(self, user="nobody", app=None, *args, **kwargs):
"""
:param user:
:param app: if None, it will be base app name
:param args:
:param kwargs:
"""
self.user = user
self.app = app or get_base_app_name()
self.args = args
self.kwargs = kwargs
# If reload is needed while GET request
self.need_reload = False
@property
def internal_endpoint(self):
"""
Endpoint of Splunk internal service.
:return:
"""
raise NotImplementedError()
def model(self, name):
"""
Real model for given name.
:param name:
:return:
"""
raise NotImplementedError()
def _loop_fields(self, meth, name, data, *args, **kwargs):
model = self.model(name)
return [getattr(f, meth)(data, *args, **kwargs) for f in model.fields]
def validate(self, name, data, existing=None):
self._loop_fields("validate", name, data, existing=existing)
def encode(self, name, data):
self._loop_fields("encode", name, data)
def decode(self, name, data):
self._loop_fields("decode", name, data)
class SingleModel(RestEndpoint):
"""
REST Model with Single Mode. It will store stanzas
with same format into one conf file.
"""
def __init__(self, conf_name, model, user="nobody", app=None, *args, **kwargs):
"""
:param conf_name: conf file name
:param model: REST model
:type model: RestModel
:param args:
:param kwargs:
"""
super().__init__(user=user, app=app, *args, **kwargs)
self.need_reload = True
self._model = model
self.conf_name = conf_name
self.config_name = kwargs.get("config_name")
@property
def internal_endpoint(self):
return "configs/conf-{}".format(self.conf_name)
def model(self, name):
return self._model
class MultipleModel(RestEndpoint):
"""
REST Model with Multiple Modes. It will store
stanzas with different formats into one conf file.
"""
def __init__(self, conf_name, models, user="nobody", app=None, *args, **kwargs):
"""
:param conf_name:
:type conf_name: basestring
:param models: list of RestModel
:type models: list
:param args:
:param kwargs:
"""
super().__init__(user=user, app=app, *args, **kwargs)
self.need_reload = True
self.conf_name = conf_name
self.models = {model.name: model for model in models}
@property
def internal_endpoint(self):
return "configs/conf-{}".format(self.conf_name)
def model(self, name):
try:
return self.models[name]
except KeyError:
raise RestError(404, "name=%s" % name)
class DataInputModel(RestEndpoint):
"""
REST Model for Data Input.
"""
def __init__(self, input_type, model, user="nobody", app=None, *args, **kwargs):
super().__init__(user=user, app=app, *args, **kwargs)
self.input_type = input_type
self._model = model
@property
def internal_endpoint(self):
return "data/inputs/{}".format(self.input_type)
def model(self, name):
return self._model
| 25.068966 | 84 | 0.607978 |
from ..error import RestError
from ..util import get_base_app_name
__all__ = [
"RestModel",
"RestEndpoint",
"SingleModel",
"MultipleModel",
"DataInputModel",
]
class RestModel:
def __init__(self, fields, name=None):
self.name = name
self.fields = fields
class RestEndpoint:
def __init__(self, user="nobody", app=None, *args, **kwargs):
self.user = user
self.app = app or get_base_app_name()
self.args = args
self.kwargs = kwargs
self.need_reload = False
@property
def internal_endpoint(self):
raise NotImplementedError()
def model(self, name):
raise NotImplementedError()
def _loop_fields(self, meth, name, data, *args, **kwargs):
model = self.model(name)
return [getattr(f, meth)(data, *args, **kwargs) for f in model.fields]
def validate(self, name, data, existing=None):
self._loop_fields("validate", name, data, existing=existing)
def encode(self, name, data):
self._loop_fields("encode", name, data)
def decode(self, name, data):
self._loop_fields("decode", name, data)
class SingleModel(RestEndpoint):
def __init__(self, conf_name, model, user="nobody", app=None, *args, **kwargs):
super().__init__(user=user, app=app, *args, **kwargs)
self.need_reload = True
self._model = model
self.conf_name = conf_name
self.config_name = kwargs.get("config_name")
@property
def internal_endpoint(self):
return "configs/conf-{}".format(self.conf_name)
def model(self, name):
return self._model
class MultipleModel(RestEndpoint):
def __init__(self, conf_name, models, user="nobody", app=None, *args, **kwargs):
super().__init__(user=user, app=app, *args, **kwargs)
self.need_reload = True
self.conf_name = conf_name
self.models = {model.name: model for model in models}
@property
def internal_endpoint(self):
return "configs/conf-{}".format(self.conf_name)
def model(self, name):
try:
return self.models[name]
except KeyError:
raise RestError(404, "name=%s" % name)
class DataInputModel(RestEndpoint):
def __init__(self, input_type, model, user="nobody", app=None, *args, **kwargs):
super().__init__(user=user, app=app, *args, **kwargs)
self.input_type = input_type
self._model = model
@property
def internal_endpoint(self):
return "data/inputs/{}".format(self.input_type)
def model(self, name):
return self._model
| true | true |
f73b04b712d5a2f1aec0e5f6bc372f9b99185b6b | 442 | py | Python | wsknn/utils/meta.py | nokaut/wsknn | dd0f0c17d0e212fb47fe6ee283cb90b9d6a5ba25 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T11:48:18.000Z | 2022-03-24T11:48:18.000Z | wsknn/utils/meta.py | nokaut/wsknn | dd0f0c17d0e212fb47fe6ee283cb90b9d6a5ba25 | [
"BSD-3-Clause"
] | 1 | 2022-03-29T08:53:59.000Z | 2022-03-29T08:53:59.000Z | wsknn/utils/meta.py | nokaut/wsknn | dd0f0c17d0e212fb47fe6ee283cb90b9d6a5ba25 | [
"BSD-3-Clause"
] | null | null | null | import yaml
def parse_settings(settings_file: str) -> dict:
"""
The function parses settings file into dict
Parameters
----------
settings_file : str
File with the model settings, must be in yaml.
Returns
-------
ydict : dict
Parsed settings used for modeling.
"""
with open(settings_file, 'r') as fstream:
ydict = yaml.safe_load(fstream)
return ydict
| 19.217391 | 66 | 0.58371 | import yaml
def parse_settings(settings_file: str) -> dict:
with open(settings_file, 'r') as fstream:
ydict = yaml.safe_load(fstream)
return ydict
| true | true |
f73b05b0700c62b63ea3a42beaae70fc2169843b | 1,989 | py | Python | lec1_step3.py | hirowgit/2B3_python_owl_logic_database_course | 81096b287c32a067aa11a9a37ae5a4c6a0d1301e | [
"MIT"
] | 1 | 2020-06-04T23:54:14.000Z | 2020-06-04T23:54:14.000Z | lec1_step3.py | hirowgit/2B3_python_owl_logic_database_course | 81096b287c32a067aa11a9a37ae5a4c6a0d1301e | [
"MIT"
] | null | null | null | lec1_step3.py | hirowgit/2B3_python_owl_logic_database_course | 81096b287c32a067aa11a9a37ae5a4c6a0d1301e | [
"MIT"
] | 1 | 2020-10-14T04:20:44.000Z | 2020-10-14T04:20:44.000Z | #!/usr/bin/env python
# coding: utf-8
# In[1]:
## Python basics for novice data scientists, supported by Wagatsuma Lab@Kyutech
#
# The MIT License (MIT): Copyright (c) 2020 Hiroaki Wagatsuma and Wagatsuma Lab@Kyutech
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
#
# # @Time : 2020-4-20
# # @Author : Hiroaki Wagatsuma
# # @Site : https://github.com/hirowgit/2A_python_basic_course
# # @IDE : Python 3.7.7 (default, Mar 10 2020, 15:43:27) [Clang 10.0.0 (clang-1000.11.45.5)] on darwin
# # @File : lec1_step3.py
# In[2]:
# running without modules on mathematics
pi
# In[3]:
# module test: if you have an error when you run this code, you need to check the installation status of those modules
import math
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# In[4]:
import math
pi=math.pi
print(pi)
# In[5]:
x = np.arange(-3.14, 3.14, 0.1)
y = np.sin(x)
plt.plot(x, y)
# In[6]:
s = pd.Series([2, 4, 6, np.nan, 7, 9])
print(s)
# In[ ]:
| 29.25 | 465 | 0.726998 | true | true | |
f73b06530db80bbe588f75d2af60bc804b0df596 | 22,815 | py | Python | uhd_restpy/testplatform/sessions/ixnetwork/topology/pceperosubobjectslist_7ea27079d1a1d53cebc6e1e83b2ca0b4.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | uhd_restpy/testplatform/sessions/ixnetwork/topology/pceperosubobjectslist_7ea27079d1a1d53cebc6e1e83b2ca0b4.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | uhd_restpy/testplatform/sessions/ixnetwork/topology/pceperosubobjectslist_7ea27079d1a1d53cebc6e1e83b2ca0b4.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class PcepEroSubObjectsList(Base):
"""
The PcepEroSubObjectsList class encapsulates a list of pcepEroSubObjectsList resources that are managed by the system.
A list of resources can be retrieved from the server using the PcepEroSubObjectsList.find() method.
"""
__slots__ = ()
_SDM_NAME = 'pcepEroSubObjectsList'
_SDM_ATT_MAP = {
'Active': 'active',
'AsNumber': 'asNumber',
'Bos': 'bos',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'FBit': 'fBit',
'Ipv4NodeId': 'ipv4NodeId',
'Ipv4Prefix': 'ipv4Prefix',
'Ipv6NodeId': 'ipv6NodeId',
'Ipv6Prefix': 'ipv6Prefix',
'LocalInterfaceId': 'localInterfaceId',
'LocalIpv4Address': 'localIpv4Address',
'LocalIpv6Address': 'localIpv6Address',
'LocalNodeId': 'localNodeId',
'LooseHop': 'looseHop',
'MplsLabel': 'mplsLabel',
'NaiType': 'naiType',
'Name': 'name',
'PrefixLength': 'prefixLength',
'RemoteInterfaceId': 'remoteInterfaceId',
'RemoteIpv4Address': 'remoteIpv4Address',
'RemoteIpv6Address': 'remoteIpv6Address',
'RemoteNodeId': 'remoteNodeId',
'Sid': 'sid',
'SidType': 'sidType',
'Srv6FunctionCode': 'srv6FunctionCode',
'Srv6Identifier': 'srv6Identifier',
'Srv6NaiType': 'srv6NaiType',
'SubObjectType': 'subObjectType',
'Tc': 'tc',
'Ttl': 'ttl',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(PcepEroSubObjectsList, self).__init__(parent, list_op)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Controls whether the ERO sub-object will be sent in the PCInitiate message.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AsNumber(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): AS Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumber']))
@property
def Bos(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This bit is set to true for the last entry in the label stack i.e., for the bottom of the stack, and false for all other label stack entries. This control will be editable only if SID Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bos']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def FBit(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): A Flag which is used to carry additional information pertaining to SID. When this bit is set, the NAI value in the subobject body is null.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FBit']))
@property
def Ipv4NodeId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Node ID is specified as an IPv4 address. This control can be configured if NAI Type is set to IPv4 Node ID and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4NodeId']))
@property
def Ipv4Prefix(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv4 Prefix is specified as an IPv4 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4Prefix']))
@property
def Ipv6NodeId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Node ID is specified as an IPv6 address. This control can be configured if NAI Type is set to IPv6 Node ID and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6NodeId']))
@property
def Ipv6Prefix(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IPv6 Prefix is specified as an IPv6 address.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6Prefix']))
@property
def LocalInterfaceId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the Local Interface ID of the Unnumbered Adjacency with IPv4 NodeIDs which is specified as a pair of Node ID / Interface ID tuples. This Control can be configured if NAI Type is set to Unnumbered Adjacency with IPv4 NodeIDs and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInterfaceId']))
@property
def LocalIpv4Address(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This Control can be configured if NAI Type is set to IPv4 Adjacency and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalIpv4Address']))
@property
def LocalIpv6Address(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This Control can be configured if NAI Type is set to IPv6 Adjacency and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalIpv6Address']))
@property
def LocalNodeId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the Local Node ID of the Unnumbered Adjacency with IPv4 NodeIDs which is specified as a pair of Node ID / Interface ID tuples. This Control can be configured if NAI Type is set to Unnumbered Adjacency with IPv4 NodeIDs and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalNodeId']))
@property
def LooseHop(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Indicates if user wants to represent a loose-hop sub object in the LSP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LooseHop']))
@property
def MplsLabel(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This control will be editable if the SID Type is set to either 20bit or 32bit MPLS-Label. This field will take the 20bit value of the MPLS-Label
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MplsLabel']))
@property
def NaiType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): NAI (Node or Adjacency Identifier) contains the NAI associated with the SID. Depending on the value of SID Type, the NAI can have different formats such as, Not Applicable IPv4 Node ID IPv6 Node ID IPv4 Adjacency IPv6 Adjacency Unnumbered Adjacency with IPv4 NodeIDs
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NaiType']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PrefixLength(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Prefix Length
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixLength']))
@property
def RemoteInterfaceId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the Remote Interface ID of the Unnumbered Adjacency with IPv4 NodeIDs which is specified as a pair of Node ID / Interface ID tuples. This Control can be configured if NAI Type is set to Unnumbered Adjacency with IPv4 NodeIDs and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInterfaceId']))
@property
def RemoteIpv4Address(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This Control can be configured if NAI Type is set to IPv4 Adjacency and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteIpv4Address']))
@property
def RemoteIpv6Address(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This Control can be configured if NAI Type is set to IPv6 Adjacency and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteIpv6Address']))
@property
def RemoteNodeId(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This is the Remote Node ID of the Unnumbered Adjacency with IPv4 NodeIDs which is specified as a pair of Node ID / Interface ID tuples. This Control can be configured if NAI Type is set to Unnumbered Adjacency with IPv4 NodeIDs and F bit is disabled.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteNodeId']))
@property
def Sid(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SID is the Segment Identifier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Sid']))
@property
def SidType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Using the Segment Identifier Type control user can configure whether to include SID or not and if included what is its type. Types are as follows: Null SID 20bit MPLS Label 32bit MPLS Label. If it is Null then S bit is set in the packet. Default value is 20bit MPLS Label.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SidType']))
@property
def Srv6FunctionCode(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Function Code is is the 16 bit field representing supported functions associated with SRv6 SIDs. This information is optional and included only for maintainability. Following function codes are currently defined - 0: Reserved 1: End Function 2: End.DX6 Function 3: End.DT6 Function 4: End.X Function
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6FunctionCode']))
@property
def Srv6Identifier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): SRv6 Identifier is the 128 bit IPv6 addresses representing SRv6 segment.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6Identifier']))
@property
def Srv6NaiType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): The SRv6 NAI Type which indicates the interpretation for NAI (Node or Adjacency Identifier).
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6NaiType']))
@property
def SubObjectType(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Using the Sub Object Type control user can configure which sub object needs to be included from the following options: Not Applicable IPv4 Prefix IPv6 Prefix AS Number.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubObjectType']))
@property
def Tc(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This field is used to carry traffic class information. This control will be editable only if SID Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tc']))
@property
def Ttl(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): This field is used to encode a time-to-live value. This control will be editable only if SID Type is MPLS Label 32bit.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ttl']))
def update(self, Name=None):
# type: (str) -> PcepEroSubObjectsList
"""Updates pcepEroSubObjectsList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
# type: (str) -> PcepEroSubObjectsList
"""Adds a new pcepEroSubObjectsList resource on the json, only valid with config assistant
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with all currently retrieved pcepEroSubObjectsList resources using find and the newly added pcepEroSubObjectsList resources available through an iterator or index
Raises
------
- Exception: if this function is not being used with config assistance
"""
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
# type: (int, str, str) -> PcepEroSubObjectsList
"""Finds and retrieves pcepEroSubObjectsList resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve pcepEroSubObjectsList resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all pcepEroSubObjectsList resources from the server.
Args
----
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Returns
-------
- self: This instance with matching pcepEroSubObjectsList resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of pcepEroSubObjectsList data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the pcepEroSubObjectsList resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AsNumber=None, Bos=None, FBit=None, Ipv4NodeId=None, Ipv4Prefix=None, Ipv6NodeId=None, Ipv6Prefix=None, LocalInterfaceId=None, LocalIpv4Address=None, LocalIpv6Address=None, LocalNodeId=None, LooseHop=None, MplsLabel=None, NaiType=None, PrefixLength=None, RemoteInterfaceId=None, RemoteIpv4Address=None, RemoteIpv6Address=None, RemoteNodeId=None, Sid=None, SidType=None, Srv6FunctionCode=None, Srv6Identifier=None, Srv6NaiType=None, SubObjectType=None, Tc=None, Ttl=None):
"""Base class infrastructure that gets a list of pcepEroSubObjectsList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AsNumber (str): optional regex of asNumber
- Bos (str): optional regex of bos
- FBit (str): optional regex of fBit
- Ipv4NodeId (str): optional regex of ipv4NodeId
- Ipv4Prefix (str): optional regex of ipv4Prefix
- Ipv6NodeId (str): optional regex of ipv6NodeId
- Ipv6Prefix (str): optional regex of ipv6Prefix
- LocalInterfaceId (str): optional regex of localInterfaceId
- LocalIpv4Address (str): optional regex of localIpv4Address
- LocalIpv6Address (str): optional regex of localIpv6Address
- LocalNodeId (str): optional regex of localNodeId
- LooseHop (str): optional regex of looseHop
- MplsLabel (str): optional regex of mplsLabel
- NaiType (str): optional regex of naiType
- PrefixLength (str): optional regex of prefixLength
- RemoteInterfaceId (str): optional regex of remoteInterfaceId
- RemoteIpv4Address (str): optional regex of remoteIpv4Address
- RemoteIpv6Address (str): optional regex of remoteIpv6Address
- RemoteNodeId (str): optional regex of remoteNodeId
- Sid (str): optional regex of sid
- SidType (str): optional regex of sidType
- Srv6FunctionCode (str): optional regex of srv6FunctionCode
- Srv6Identifier (str): optional regex of srv6Identifier
- Srv6NaiType (str): optional regex of srv6NaiType
- SubObjectType (str): optional regex of subObjectType
- Tc (str): optional regex of tc
- Ttl (str): optional regex of ttl
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 42.328386 | 529 | 0.652509 |
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class PcepEroSubObjectsList(Base):
__slots__ = ()
_SDM_NAME = 'pcepEroSubObjectsList'
_SDM_ATT_MAP = {
'Active': 'active',
'AsNumber': 'asNumber',
'Bos': 'bos',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'FBit': 'fBit',
'Ipv4NodeId': 'ipv4NodeId',
'Ipv4Prefix': 'ipv4Prefix',
'Ipv6NodeId': 'ipv6NodeId',
'Ipv6Prefix': 'ipv6Prefix',
'LocalInterfaceId': 'localInterfaceId',
'LocalIpv4Address': 'localIpv4Address',
'LocalIpv6Address': 'localIpv6Address',
'LocalNodeId': 'localNodeId',
'LooseHop': 'looseHop',
'MplsLabel': 'mplsLabel',
'NaiType': 'naiType',
'Name': 'name',
'PrefixLength': 'prefixLength',
'RemoteInterfaceId': 'remoteInterfaceId',
'RemoteIpv4Address': 'remoteIpv4Address',
'RemoteIpv6Address': 'remoteIpv6Address',
'RemoteNodeId': 'remoteNodeId',
'Sid': 'sid',
'SidType': 'sidType',
'Srv6FunctionCode': 'srv6FunctionCode',
'Srv6Identifier': 'srv6Identifier',
'Srv6NaiType': 'srv6NaiType',
'SubObjectType': 'subObjectType',
'Tc': 'tc',
'Ttl': 'ttl',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(PcepEroSubObjectsList, self).__init__(parent, list_op)
@property
def Active(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AsNumber(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumber']))
@property
def Bos(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Bos']))
@property
def Count(self):
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def FBit(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FBit']))
@property
def Ipv4NodeId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4NodeId']))
@property
def Ipv4Prefix(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv4Prefix']))
@property
def Ipv6NodeId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6NodeId']))
@property
def Ipv6Prefix(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ipv6Prefix']))
@property
def LocalInterfaceId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalInterfaceId']))
@property
def LocalIpv4Address(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalIpv4Address']))
@property
def LocalIpv6Address(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalIpv6Address']))
@property
def LocalNodeId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LocalNodeId']))
@property
def LooseHop(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LooseHop']))
@property
def MplsLabel(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['MplsLabel']))
@property
def NaiType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NaiType']))
@property
def Name(self):
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def PrefixLength(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PrefixLength']))
@property
def RemoteInterfaceId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteInterfaceId']))
@property
def RemoteIpv4Address(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteIpv4Address']))
@property
def RemoteIpv6Address(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteIpv6Address']))
@property
def RemoteNodeId(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RemoteNodeId']))
@property
def Sid(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Sid']))
@property
def SidType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SidType']))
@property
def Srv6FunctionCode(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6FunctionCode']))
@property
def Srv6Identifier(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6Identifier']))
@property
def Srv6NaiType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Srv6NaiType']))
@property
def SubObjectType(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SubObjectType']))
@property
def Tc(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Tc']))
@property
def Ttl(self):
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Ttl']))
def update(self, Name=None):
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, Name=None):
return self._add_xpath(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Count=None, DescriptiveName=None, Name=None):
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AsNumber=None, Bos=None, FBit=None, Ipv4NodeId=None, Ipv4Prefix=None, Ipv6NodeId=None, Ipv6Prefix=None, LocalInterfaceId=None, LocalIpv4Address=None, LocalIpv6Address=None, LocalNodeId=None, LooseHop=None, MplsLabel=None, NaiType=None, PrefixLength=None, RemoteInterfaceId=None, RemoteIpv4Address=None, RemoteIpv6Address=None, RemoteNodeId=None, Sid=None, SidType=None, Srv6FunctionCode=None, Srv6Identifier=None, Srv6NaiType=None, SubObjectType=None, Tc=None, Ttl=None):
return self._get_ngpf_device_ids(locals())
| true | true |
f73b07c81484deadd67f88b7cda2aebdf3fd7843 | 2,734 | py | Python | vader/vader-web.py | cherdt/vader-webform | 16caa59e9f86134652f164b7795d8d3a41b30725 | [
"MIT"
] | null | null | null | vader/vader-web.py | cherdt/vader-webform | 16caa59e9f86134652f164b7795d8d3a41b30725 | [
"MIT"
] | 3 | 2020-03-20T16:53:30.000Z | 2020-03-21T22:43:18.000Z | vader/vader-web.py | cherdt/vader-webform | 16caa59e9f86134652f164b7795d8d3a41b30725 | [
"MIT"
] | null | null | null | import json
import nltk.data
from flask import Flask
from flask import escape
from flask import Markup
from flask import render_template
from flask import request
#from flask import send_static_file
from flask_cors import CORS
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__)
CORS(app)
splitter = nltk.data.load('tokenizers/punkt/english.pickle')
sid = SentimentIntensityAnalyzer()
def score_text_and_summary(text):
"""Given a block of text, return scores for sentences and combined text
Arguments:
text -- a block of text
"""
scored_values = {}
scored_values['sentences'] = []
# Analyzing scores for entire text, in aggregate
scores = score_text(text)
scored_values['aggregate_score'] = scores['compound']
# Analyzes scores per sentence
scored_values['sentences'] = score_sentences(text)
return scored_values
def score_sentences(text):
"""Given a block of text, return scores for each consituent sentence
Arguments:
text -- a block of text
"""
scored_sentences = []
# Analyzes scores per sentence
for sentence in splitter.tokenize(text):
scores = score_text(sentence)
scored_sentences.append({'sentence': escape(sentence), 'score': scores['compound']})
return scored_sentences
def score_sentences_to_html(text):
html = []
scored_sentences = score_sentences(text)
for item in scored_sentences:
css_class = 'neutral'
if item['score'] > 0:
css_class = 'positive'
elif item['score'] < 0:
css_class = 'negative'
span = Markup('<span class="' + css_class + '">') + escape(item['sentence']) + Markup('<sup>' + format(item['score'], '.2f') + '</sup></span>')
html.append(span)
return Markup(' '.join(html))
def score_text(text):
"""Given text, return the score of the text
Arguments:
text -- some text
"""
return sid.polarity_scores(text.replace('#', ''))
@app.route("/", methods=['GET', 'POST'])
def index():
input_text = ''
scored_text = ''
aggregate_score = ''
if request.form.get('input_text'):
input_text = request.form.get('input_text')
scored_text = score_sentences_to_html(input_text)
aggregate_score = str(score_text(input_text)['compound'])
return render_template('index.html',
input_text=input_text,
scored_text=scored_text,
aggregate_score=aggregate_score)
@app.route("/api")
def score_input():
text = request.args.get('text')
return json.dumps(score_text_and_summary(text))
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 25.551402 | 151 | 0.653987 | import json
import nltk.data
from flask import Flask
from flask import escape
from flask import Markup
from flask import render_template
from flask import request
from flask_cors import CORS
from nltk.sentiment.vader import SentimentIntensityAnalyzer
app = Flask(__name__)
CORS(app)
splitter = nltk.data.load('tokenizers/punkt/english.pickle')
sid = SentimentIntensityAnalyzer()
def score_text_and_summary(text):
scored_values = {}
scored_values['sentences'] = []
scores = score_text(text)
scored_values['aggregate_score'] = scores['compound']
scored_values['sentences'] = score_sentences(text)
return scored_values
def score_sentences(text):
scored_sentences = []
for sentence in splitter.tokenize(text):
scores = score_text(sentence)
scored_sentences.append({'sentence': escape(sentence), 'score': scores['compound']})
return scored_sentences
def score_sentences_to_html(text):
html = []
scored_sentences = score_sentences(text)
for item in scored_sentences:
css_class = 'neutral'
if item['score'] > 0:
css_class = 'positive'
elif item['score'] < 0:
css_class = 'negative'
span = Markup('<span class="' + css_class + '">') + escape(item['sentence']) + Markup('<sup>' + format(item['score'], '.2f') + '</sup></span>')
html.append(span)
return Markup(' '.join(html))
def score_text(text):
return sid.polarity_scores(text.replace('#', ''))
@app.route("/", methods=['GET', 'POST'])
def index():
input_text = ''
scored_text = ''
aggregate_score = ''
if request.form.get('input_text'):
input_text = request.form.get('input_text')
scored_text = score_sentences_to_html(input_text)
aggregate_score = str(score_text(input_text)['compound'])
return render_template('index.html',
input_text=input_text,
scored_text=scored_text,
aggregate_score=aggregate_score)
@app.route("/api")
def score_input():
text = request.args.get('text')
return json.dumps(score_text_and_summary(text))
if __name__ == '__main__':
app.run(host='0.0.0.0')
| true | true |
f73b08ee4fee92e19f3eb5047268d576244f59e9 | 1,831 | py | Python | setup.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | null | null | null | setup.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | 1 | 2019-03-08T10:33:34.000Z | 2019-03-19T13:52:46.000Z | setup.py | WnP/pysass | ba9b949e2ee68f58bd4840a3ae91bd401ebe0253 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# read the contents of your README file
from os import path
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="pysass",
description="Sass CLI Python: "
"A wrapper to libsass-python with watchdog capability.",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.1.0",
py_modules=["pysass"],
packages=[],
package_data={},
license="MIT License",
author="Steeve Chailloux",
author_email="steevechailloux" "@" "gmail.com",
url="https://github.com/WnP/pysass/",
entry_points={"console_scripts": [["pysass = pysass:main"]]},
install_requires=["libsass", "watchdog"],
extras_require={},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Stackless",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
],
)
| 36.62 | 73 | 0.630257 |
from os import path
from setuptools import setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="pysass",
description="Sass CLI Python: "
"A wrapper to libsass-python with watchdog capability.",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.1.0",
py_modules=["pysass"],
packages=[],
package_data={},
license="MIT License",
author="Steeve Chailloux",
author_email="steevechailloux" "@" "gmail.com",
url="https://github.com/WnP/pysass/",
entry_points={"console_scripts": [["pysass = pysass:main"]]},
install_requires=["libsass", "watchdog"],
extras_require={},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: C",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Stackless",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Compilers",
],
)
| true | true |
f73b0a5cbdc2ead38b5e63e1f96b962ddb083520 | 3,792 | py | Python | shrdlu_blocks/demo.py | hosford42/SHRDLUBlocks | 8f847f2dd3a258318c418c15ebc6233827da9c27 | [
"MIT"
] | 1 | 2021-08-30T10:31:32.000Z | 2021-08-30T10:31:32.000Z | shrdlu_blocks/demo.py | hosford42/SHRDLUBlocks | 8f847f2dd3a258318c418c15ebc6233827da9c27 | [
"MIT"
] | null | null | null | shrdlu_blocks/demo.py | hosford42/SHRDLUBlocks | 8f847f2dd3a258318c418c15ebc6233827da9c27 | [
"MIT"
] | 1 | 2022-01-08T19:43:02.000Z | 2022-01-08T19:43:02.000Z | """
A simple demo of the environment.
Usage:
python3 -m shrdlu_blocks.demo
The environment will be displayed in a graphics window. The user can type
various commands into the graphics window to query the scene and control the
grasper. Type `help` to get a list of commands.
"""
import ast
import io
import logging
import traceback
import pygame.display
from shrdlu_blocks.control import Controller
from shrdlu_blocks.scenes import PhysicalObject
from shrdlu_blocks.typedefs import UnmetConditionError, ObjectID
from shrdlu_blocks.viewer import Viewer
__all__ = ['demo']
def demo_callback(controller: Controller, command: str) -> str:
"""Parse and execute the command."""
if command == 'exit':
pygame.quit()
sys.exit(0)
output_buffer = io.StringIO()
if command == 'help':
print("Commands:", file=output_buffer)
print(" help", file=output_buffer)
print(" exit", file=output_buffer)
for name in dir(controller):
if not name.startswith('_'):
print(" " + name, file=output_buffer)
return output_buffer.getvalue() or None
pieces = command.split()
command = pieces.pop(0)
if not command:
return output_buffer.getvalue() or None
if '.' in command or command.startswith('_') or command not in dir(controller):
print("ERROR: Invalid command", file=output_buffer)
return output_buffer.getvalue() or None
# noinspection PyBroadException
try:
args = []
for piece in pieces:
try:
arg = ast.literal_eval(piece)
except ValueError:
arg = piece
args.append(arg)
attribute = getattr(controller, command)
if callable(attribute) or args:
result = attribute(*args)
else:
result = attribute
if result is None:
pass
elif isinstance(result, str) or not hasattr(result, '__iter__'):
print(repr(result), file=output_buffer)
else:
object_count = len(list(controller.find_objects()))
for item in result:
if ('objects' in command and isinstance(item, int) and
0 <= item < object_count):
tags = dict(controller.iter_object_tags(ObjectID(item)))
# Cheat just a little by constructing a mock object with the tags so we can use
# the __str__() method it defines.
# noinspection PyTypeChecker
mock_obj = PhysicalObject(None, None, None, tags)
print(str(mock_obj), file=output_buffer)
else:
print(repr(item), file=output_buffer)
except UnmetConditionError as e:
print(e, file=output_buffer)
except Exception:
traceback.print_exc(file=output_buffer)
return output_buffer.getvalue() or None
def demo():
"""
Let the user play around with the standard scene using programmatic
instructions passed directly to the controller.
The environment will be displayed in a graphics window. The user can type
various commands into the graphics window to query the scene and control
the grasper. Type `help` to get a list of commands.
"""
pygame.init()
screen_info = pygame.display.Info()
screen_width = screen_info.current_w
screen_height = screen_info.current_h
screen = pygame.display.set_mode((screen_width // 2, screen_height // 2))
Viewer(screen, "SHRDLU Blocks Demo", demo_callback,
initial_output='Type "help" for a list of available commands.').run()
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
demo()
| 34.472727 | 99 | 0.638449 |
import ast
import io
import logging
import traceback
import pygame.display
from shrdlu_blocks.control import Controller
from shrdlu_blocks.scenes import PhysicalObject
from shrdlu_blocks.typedefs import UnmetConditionError, ObjectID
from shrdlu_blocks.viewer import Viewer
__all__ = ['demo']
def demo_callback(controller: Controller, command: str) -> str:
if command == 'exit':
pygame.quit()
sys.exit(0)
output_buffer = io.StringIO()
if command == 'help':
print("Commands:", file=output_buffer)
print(" help", file=output_buffer)
print(" exit", file=output_buffer)
for name in dir(controller):
if not name.startswith('_'):
print(" " + name, file=output_buffer)
return output_buffer.getvalue() or None
pieces = command.split()
command = pieces.pop(0)
if not command:
return output_buffer.getvalue() or None
if '.' in command or command.startswith('_') or command not in dir(controller):
print("ERROR: Invalid command", file=output_buffer)
return output_buffer.getvalue() or None
try:
args = []
for piece in pieces:
try:
arg = ast.literal_eval(piece)
except ValueError:
arg = piece
args.append(arg)
attribute = getattr(controller, command)
if callable(attribute) or args:
result = attribute(*args)
else:
result = attribute
if result is None:
pass
elif isinstance(result, str) or not hasattr(result, '__iter__'):
print(repr(result), file=output_buffer)
else:
object_count = len(list(controller.find_objects()))
for item in result:
if ('objects' in command and isinstance(item, int) and
0 <= item < object_count):
tags = dict(controller.iter_object_tags(ObjectID(item)))
mock_obj = PhysicalObject(None, None, None, tags)
print(str(mock_obj), file=output_buffer)
else:
print(repr(item), file=output_buffer)
except UnmetConditionError as e:
print(e, file=output_buffer)
except Exception:
traceback.print_exc(file=output_buffer)
return output_buffer.getvalue() or None
def demo():
pygame.init()
screen_info = pygame.display.Info()
screen_width = screen_info.current_w
screen_height = screen_info.current_h
screen = pygame.display.set_mode((screen_width // 2, screen_height // 2))
Viewer(screen, "SHRDLU Blocks Demo", demo_callback,
initial_output='Type "help" for a list of available commands.').run()
if __name__ == '__main__':
import sys
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
demo()
| true | true |
f73b0a84dd68da3e5ac508e5df1cfc14043f82d0 | 866 | py | Python | mp4box/parsing/mdhd.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
] | 7 | 2019-08-14T03:03:51.000Z | 2021-11-14T19:10:00.000Z | mp4box/parsing/mdhd.py | wanyhamo/mp4box | c5c73cd37c01bd9d637f1f3ed82221065dc86d6f | [
"BSD-3-Clause"
] | 10 | 2019-08-03T16:27:08.000Z | 2019-09-10T10:05:23.000Z | mp4box/parsing/mdhd.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
] | 7 | 2019-08-19T17:58:03.000Z | 2021-03-03T07:25:54.000Z | from mp4box.box import MediaHeaderBox
def parse_mdhd(reader, my_size):
version = reader.read32()
box = MediaHeaderBox(my_size, version, 0)
if version == 0:
box.creation_time = reader.read32()
box.modification_time = reader.read32()
box.timescale = reader.read32()
box.duration = reader.read32()
else:
box.creation_time = reader.read64()
box.modification_time = reader.read64()
box.timescale = reader.read32()
box.duration = reader.read64()
data = reader.readn_as_int(2)
box.pad = (data >> 15) & 1
language = data & 0x7FFF
box.language = (
chr(97 + (language >> 10) - 1 % 97)
+ chr(97 + (language >> 5 & 0x1F) - 1 % 97)
+ chr(97 + (language & 0x1F) - 1 % 97)
)
box.predefined = reader.read16()
return box
| 30.928571 | 52 | 0.572748 | from mp4box.box import MediaHeaderBox
def parse_mdhd(reader, my_size):
version = reader.read32()
box = MediaHeaderBox(my_size, version, 0)
if version == 0:
box.creation_time = reader.read32()
box.modification_time = reader.read32()
box.timescale = reader.read32()
box.duration = reader.read32()
else:
box.creation_time = reader.read64()
box.modification_time = reader.read64()
box.timescale = reader.read32()
box.duration = reader.read64()
data = reader.readn_as_int(2)
box.pad = (data >> 15) & 1
language = data & 0x7FFF
box.language = (
chr(97 + (language >> 10) - 1 % 97)
+ chr(97 + (language >> 5 & 0x1F) - 1 % 97)
+ chr(97 + (language & 0x1F) - 1 % 97)
)
box.predefined = reader.read16()
return box
| true | true |
f73b0aea61f411d235153b75fb33290b07c86cb8 | 7,176 | py | Python | azure-mgmt-web/azure/mgmt/web/operations/deleted_web_apps_operations.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 2 | 2020-07-29T14:22:17.000Z | 2020-11-06T18:47:40.000Z | azure-mgmt-web/azure/mgmt/web/operations/deleted_web_apps_operations.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2016-08-01T07:37:04.000Z | 2016-08-01T07:37:04.000Z | azure-mgmt-web/azure/mgmt/web/operations/deleted_web_apps_operations.py | azuresdkci1x/azure-sdk-for-python-1722 | e08fa6606543ce0f35b93133dbb78490f8e6bcc9 | [
"MIT"
] | 1 | 2018-11-09T06:17:41.000Z | 2018-11-09T06:17:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class DeletedWebAppsOperations(object):
"""DeletedWebAppsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: API Version. Constant value: "2016-03-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Get all deleted apps for a subscription.
Get all deleted apps for a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeletedSitePaged
<azure.mgmt.web.models.DeletedSitePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets deleted web apps in subscription.
Gets deleted web apps in subscription.
:param resource_group_name: Name of the resource group to which the
resource belongs.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`DeletedSitePaged
<azure.mgmt.web.models.DeletedSitePaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/deletedSites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| 41.964912 | 173 | 0.627926 |
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class DeletedWebAppsOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-03-01"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Web/deletedSites'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list_by_resource_group(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
def internal_paging(next_link=None, raw=False):
if not next_link:
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/deletedSites'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern='^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
deserialized = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.DeletedSitePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
| true | true |
f73b0b1daf4e0dc9eeaea3df01a84fbe271845de | 2,654 | py | Python | var/spack/repos/builtin/packages/gnutls/package.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/gnutls/package.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/gnutls/package.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | null | null | null | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Gnutls(AutotoolsPackage):
"""GnuTLS is a secure communications library implementing the SSL, TLS
and DTLS protocols and technologies around them. It provides a simple C
language application programming interface (API) to access the secure
communications protocols as well as APIs to parse and write X.509, PKCS
#12, OpenPGP and other required structures. It is aimed to be portable
and efficient with focus on security and interoperability."""
homepage = "http://www.gnutls.org"
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v3.5/gnutls-3.5.19.tar.xz"
version('3.5.19', sha256='1936eb64f03aaefd6eb16cef0567457777618573826b94d03376bb6a4afadc44')
version('3.5.13', '4fd41ad86572933c2379b4cc321a0959')
version('3.5.10', '336c03a71ba90184ffd0388075dde504')
version('3.5.9', '0ab25eb6a1509345dd085bc21a387951')
version('3.3.9', 'ff61b77e39d09f1140ab5a9cf52c58b6')
variant('zlib', default=True, description='Enable zlib compression support')
# Note that version 3.3.9 of gnutls doesn't support nettle 3.0.
depends_on('nettle@:2.9', when='@3.3.9')
depends_on('nettle', when='@3.5:')
depends_on('libidn2@:2.0.99', when='@:3.5.99')
depends_on('zlib', when='+zlib')
depends_on('gettext')
depends_on('pkgconfig', type='build')
build_directory = 'spack-build'
def url_for_version(self, version):
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v{0}/gnutls-{1}.tar.xz"
return url.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
args = [
'--enable-static',
]
if spec.satisfies('@3.5:'):
# use shipped libraries, might be turned into variants
args.append('--with-included-libtasn1')
args.append('--with-included-unistring')
args.append('--without-p11-kit') # p11-kit@0.23.1: ...
if '+zlib' in spec:
args.append('--with-zlib')
else:
args.append('--without-zlib')
if self.run_tests:
args.extend([
'--enable-tests',
'--enable-valgrind-tests',
'--enable-full-test-suite',
])
else:
args.extend([
'--disable-tests',
'--disable-valgrind-tests',
'--disable-full-test-suite',
])
return args
| 35.864865 | 96 | 0.625094 |
from spack import *
class Gnutls(AutotoolsPackage):
homepage = "http://www.gnutls.org"
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v3.5/gnutls-3.5.19.tar.xz"
version('3.5.19', sha256='1936eb64f03aaefd6eb16cef0567457777618573826b94d03376bb6a4afadc44')
version('3.5.13', '4fd41ad86572933c2379b4cc321a0959')
version('3.5.10', '336c03a71ba90184ffd0388075dde504')
version('3.5.9', '0ab25eb6a1509345dd085bc21a387951')
version('3.3.9', 'ff61b77e39d09f1140ab5a9cf52c58b6')
variant('zlib', default=True, description='Enable zlib compression support')
depends_on('nettle@:2.9', when='@3.3.9')
depends_on('nettle', when='@3.5:')
depends_on('libidn2@:2.0.99', when='@:3.5.99')
depends_on('zlib', when='+zlib')
depends_on('gettext')
depends_on('pkgconfig', type='build')
build_directory = 'spack-build'
def url_for_version(self, version):
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v{0}/gnutls-{1}.tar.xz"
return url.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
args = [
'--enable-static',
]
if spec.satisfies('@3.5:'):
# use shipped libraries, might be turned into variants
args.append('--with-included-libtasn1')
args.append('--with-included-unistring')
args.append('--without-p11-kit') # p11-kit@0.23.1: ...
if '+zlib' in spec:
args.append('--with-zlib')
else:
args.append('--without-zlib')
if self.run_tests:
args.extend([
'--enable-tests',
'--enable-valgrind-tests',
'--enable-full-test-suite',
])
else:
args.extend([
'--disable-tests',
'--disable-valgrind-tests',
'--disable-full-test-suite',
])
return args
| true | true |
f73b0c922f4cce44c12d31dcb1e553205a57ad87 | 591 | py | Python | sig-backend/api/routes/beach_routes.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | 1 | 2020-12-01T19:53:14.000Z | 2020-12-01T19:53:14.000Z | sig-backend/api/routes/beach_routes.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | null | null | null | sig-backend/api/routes/beach_routes.py | antonioalfa22/sig-playas-asturias | 3cc087d44a0dc7cdc932adbc9e877a4b53fcff93 | [
"MIT"
] | null | null | null | from flask_cors import cross_origin
from flask import request, jsonify
from flask import current_app as app
from api.controllers.beach_controller import BeachController
@app.route('/api/beaches', methods=['GET'])
@cross_origin()
def get_all_beaches():
return jsonify(BeachController.get_all_beaches())
@app.route('/api/beaches/<int:id>', methods=['GET'])
@cross_origin()
def get_beach_by_id(id):
return BeachController.get_beach_by_id(id)
@app.route('/api/search', methods=['POST'])
@cross_origin()
def search_beach():
return jsonify(BeachController.search_beach(request))
| 25.695652 | 60 | 0.766497 | from flask_cors import cross_origin
from flask import request, jsonify
from flask import current_app as app
from api.controllers.beach_controller import BeachController
@app.route('/api/beaches', methods=['GET'])
@cross_origin()
def get_all_beaches():
return jsonify(BeachController.get_all_beaches())
@app.route('/api/beaches/<int:id>', methods=['GET'])
@cross_origin()
def get_beach_by_id(id):
return BeachController.get_beach_by_id(id)
@app.route('/api/search', methods=['POST'])
@cross_origin()
def search_beach():
return jsonify(BeachController.search_beach(request))
| true | true |
f73b0d4790f474cdb8909c630dc85fc514afe8e3 | 260 | py | Python | evaluateModel.py | rdv0011/egoHands-coreML | c8c8c12606bc63da7538d23b45e76c59896324bc | [
"Apache-2.0"
] | 1 | 2020-07-10T13:39:50.000Z | 2020-07-10T13:39:50.000Z | evaluateModel.py | rdv0011/egoHands-coreML | c8c8c12606bc63da7538d23b45e76c59896324bc | [
"Apache-2.0"
] | null | null | null | evaluateModel.py | rdv0011/egoHands-coreML | c8c8c12606bc63da7538d23b45e76c59896324bc | [
"Apache-2.0"
] | null | null | null | import turicreate as tc
# Inferring using original Darknet-YOLO model
handsModel = tc.load_model('Hands')
# Evaluate the model and save the results into a dictionary
test_data = tc.SFrame('test.sframe')
metrics = handsModel.evaluate(test_data)
print(metrics) | 32.5 | 59 | 0.792308 | import turicreate as tc
handsModel = tc.load_model('Hands')
test_data = tc.SFrame('test.sframe')
metrics = handsModel.evaluate(test_data)
print(metrics) | true | true |
f73b0f3100f9f71347c305e32138d653a727e52a | 2,734 | py | Python | src/services/scadaApiFetcher.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | src/services/scadaApiFetcher.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | src/services/scadaApiFetcher.py | nagasudhirpulla/wrldc_scada_mumbai_dashboard | bc107ef47568781b588316f0c5c0c0d2a08adac8 | [
"MIT"
] | null | null | null | import requests
import json
import datetime as dt
from typing import Dict, Union, List, Optional
from src.typeDefs.scadaApiDataSample import IScadaApiDataSample
import pandas as pd
import random
class ScadaApiFetcher():
apiHost: str = ''
apiPort: int = 80
isDummyFetch: bool = False
def __init__(self, apiHost: str, apiPort: int, isDummyFetch=False):
self.apiHost = apiHost
self.apiPort = apiPort
self.isDummyFetch = isDummyFetch
def fetchPntHistData(self, pnt: str, startTime: dt.datetime, endTime: dt.datetime, fetchStrategy: str = 'snap', sampleFreq: int = 300) -> List[IScadaApiDataSample]:
if self.isDummyFetch:
if (startTime > endTime) or (sampleFreq == 0):
return []
currTime = startTime
dataRes: List[IScadaApiDataSample] = []
while currTime <= endTime:
dataRes.append(
{"timestamp": currTime, "dval": random.randint(1, 100), "status": "GOOD"})
currTime = currTime + dt.timedelta(seconds=sampleFreq)
return dataRes
startTimeStr = startTime.strftime('%d/%m/%Y/%H:%M:%S')
endTimeStr = endTime.strftime('%d/%m/%Y/%H:%M:%S')
# print(req_date_str)
params: Dict[str, Union[int, str]] = dict(
pnt=pnt,
strtime=startTimeStr,
endtime=endTimeStr,
secs=sampleFreq,
type=fetchStrategy
)
try:
# http://host:80/api/values/history?pnt=pntId&strtime=12/12/2019/00:00:00&endtime=13/12/2019/00:00:00&secs=900&type=average
r = requests.get(
url="http://{0}:{1}/api/values/history".format(self.apiHost, self.apiPort), params=params)
resTxt = r.text
if pd.isna(resTxt) or (resTxt == '') or (resTxt == '[]') or (resTxt == 'null'):
return []
data = json.loads(resTxt)
return data
except:
return []
def fetchPntRtData(self, pnt) -> Optional[float]:
if self.isDummyFetch:
return random.randrange(1, 100)
params = dict(
pnt=pnt,
)
try:
# http://host:80/api/values/real?pnt=pntId&strtime=12/12/2019/00:00:00&endtime=13/12/2019/00:00:00&secs=900&type=average
r = requests.get(
url="http://{0}:{1}/api/values/real".format(self.apiHost, self.apiPort), params=params)
resTxt = r.text
if pd.isna(resTxt) or (resTxt == 'null') or (resTxt == ''):
return None
resData: IScadaApiDataSample = json.loads(resTxt)
return float(resData['dval'])
except:
return None
| 39.057143 | 168 | 0.572056 | import requests
import json
import datetime as dt
from typing import Dict, Union, List, Optional
from src.typeDefs.scadaApiDataSample import IScadaApiDataSample
import pandas as pd
import random
class ScadaApiFetcher():
apiHost: str = ''
apiPort: int = 80
isDummyFetch: bool = False
def __init__(self, apiHost: str, apiPort: int, isDummyFetch=False):
self.apiHost = apiHost
self.apiPort = apiPort
self.isDummyFetch = isDummyFetch
def fetchPntHistData(self, pnt: str, startTime: dt.datetime, endTime: dt.datetime, fetchStrategy: str = 'snap', sampleFreq: int = 300) -> List[IScadaApiDataSample]:
if self.isDummyFetch:
if (startTime > endTime) or (sampleFreq == 0):
return []
currTime = startTime
dataRes: List[IScadaApiDataSample] = []
while currTime <= endTime:
dataRes.append(
{"timestamp": currTime, "dval": random.randint(1, 100), "status": "GOOD"})
currTime = currTime + dt.timedelta(seconds=sampleFreq)
return dataRes
startTimeStr = startTime.strftime('%d/%m/%Y/%H:%M:%S')
endTimeStr = endTime.strftime('%d/%m/%Y/%H:%M:%S')
params: Dict[str, Union[int, str]] = dict(
pnt=pnt,
strtime=startTimeStr,
endtime=endTimeStr,
secs=sampleFreq,
type=fetchStrategy
)
try:
r = requests.get(
url="http://{0}:{1}/api/values/history".format(self.apiHost, self.apiPort), params=params)
resTxt = r.text
if pd.isna(resTxt) or (resTxt == '') or (resTxt == '[]') or (resTxt == 'null'):
return []
data = json.loads(resTxt)
return data
except:
return []
def fetchPntRtData(self, pnt) -> Optional[float]:
if self.isDummyFetch:
return random.randrange(1, 100)
params = dict(
pnt=pnt,
)
try:
r = requests.get(
url="http://{0}:{1}/api/values/real".format(self.apiHost, self.apiPort), params=params)
resTxt = r.text
if pd.isna(resTxt) or (resTxt == 'null') or (resTxt == ''):
return None
resData: IScadaApiDataSample = json.loads(resTxt)
return float(resData['dval'])
except:
return None
| true | true |
f73b10152eedec8dc9da2a71c37cdddeaabec1ad | 974 | py | Python | server/lib/python3.9/site-packages/stripe/api_resources/transfer.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 1,078 | 2015-01-06T03:35:05.000Z | 2022-03-25T13:25:48.000Z | server/lib/python3.9/site-packages/stripe/api_resources/transfer.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 558 | 2015-01-07T19:05:02.000Z | 2022-03-28T22:19:24.000Z | server/lib/python3.9/site-packages/stripe/api_resources/transfer.py | ejanicas-stripe/hotel | a0d0a7e1ae14b509a5c9d05d17603b99399cb752 | [
"MIT"
] | 382 | 2015-01-04T14:06:09.000Z | 2022-03-16T04:52:04.000Z | # File generated from our OpenAPI spec
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import custom_method
from stripe.api_resources.abstract import nested_resource_class_methods
@custom_method("cancel", http_verb="post")
@nested_resource_class_methods(
"reversal",
operations=["create", "retrieve", "update", "list"],
)
class Transfer(
CreateableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
OBJECT_NAME = "transfer"
def cancel(self, idempotency_key=None, **params):
url = self.instance_url() + "/cancel"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
| 33.586207 | 71 | 0.774127 |
from __future__ import absolute_import, division, print_function
from stripe import util
from stripe.api_resources.abstract import CreateableAPIResource
from stripe.api_resources.abstract import ListableAPIResource
from stripe.api_resources.abstract import UpdateableAPIResource
from stripe.api_resources.abstract import custom_method
from stripe.api_resources.abstract import nested_resource_class_methods
@custom_method("cancel", http_verb="post")
@nested_resource_class_methods(
"reversal",
operations=["create", "retrieve", "update", "list"],
)
class Transfer(
CreateableAPIResource,
ListableAPIResource,
UpdateableAPIResource,
):
OBJECT_NAME = "transfer"
def cancel(self, idempotency_key=None, **params):
url = self.instance_url() + "/cancel"
headers = util.populate_headers(idempotency_key)
self.refresh_from(self.request("post", url, params, headers))
return self
| true | true |
f73b109eb879d68cf4a557550deb9ff69b20173b | 770 | py | Python | examples/session_hello_world.py | 13751742405/photoshop-python-api | 5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70 | [
"MIT"
] | null | null | null | examples/session_hello_world.py | 13751742405/photoshop-python-api | 5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70 | [
"MIT"
] | null | null | null | examples/session_hello_world.py | 13751742405/photoshop-python-api | 5fe9b46dd2b2b4e2e1e6ef99a68d68b4fc032a70 | [
"MIT"
] | null | null | null | """Add slate information dynamically."""
import os
from tempfile import mkdtemp
from photoshop import Session
with Session() as adobe:
doc = adobe.app.documents.add(2000, 2000)
text_color = adobe.SolidColor()
text_color.rgb.red = 255
new_text_layer = doc.artLayers.add()
new_text_layer.kind = adobe.LayerKind.TextLayer
new_text_layer.textItem.contents = "Hello, World!"
new_text_layer.textItem.position = [160, 167]
new_text_layer.textItem.size = 40
new_text_layer.textItem.color = text_color
options = adobe.JPEGSaveOptions(quality=1)
jpg_file = os.path.join(mkdtemp("photoshop-python-api"), "hello_world.jpg")
doc.saveAs(jpg_file, options, asCopy=True)
adobe.app.doJavaScript(f'alert("save to jpg: {jpg_file}")')
| 35 | 79 | 0.72987 |
import os
from tempfile import mkdtemp
from photoshop import Session
with Session() as adobe:
doc = adobe.app.documents.add(2000, 2000)
text_color = adobe.SolidColor()
text_color.rgb.red = 255
new_text_layer = doc.artLayers.add()
new_text_layer.kind = adobe.LayerKind.TextLayer
new_text_layer.textItem.contents = "Hello, World!"
new_text_layer.textItem.position = [160, 167]
new_text_layer.textItem.size = 40
new_text_layer.textItem.color = text_color
options = adobe.JPEGSaveOptions(quality=1)
jpg_file = os.path.join(mkdtemp("photoshop-python-api"), "hello_world.jpg")
doc.saveAs(jpg_file, options, asCopy=True)
adobe.app.doJavaScript(f'alert("save to jpg: {jpg_file}")')
| true | true |
f73b131b5b91a24c4796a66ea31210ff751b8e6a | 436 | py | Python | app/feeds.py | sunxr9/django-blog-scrapy | b881765ecbc16448245b22e1f5caa6288a1fb9b4 | [
"MIT"
] | null | null | null | app/feeds.py | sunxr9/django-blog-scrapy | b881765ecbc16448245b22e1f5caa6288a1fb9b4 | [
"MIT"
] | null | null | null | app/feeds.py | sunxr9/django-blog-scrapy | b881765ecbc16448245b22e1f5caa6288a1fb9b4 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
from django.contrib.syndication.views import Feed
from .models import Article
class AllArticleRssFeed(Feed):
title = '个人博客'
link = '/'
# 需要显示的条目
def items(self):
return Article.objects.all()[:5]
# 显示内容的标题
def item_title(self, item):
return '[%s] %s' % (item.category, item.title)
# rss中显示的描述
def item_description(self, item):
return item.body[:20]
| 19.818182 | 54 | 0.616972 |
from django.contrib.syndication.views import Feed
from .models import Article
class AllArticleRssFeed(Feed):
title = '个人博客'
link = '/'
def items(self):
return Article.objects.all()[:5]
def item_title(self, item):
return '[%s] %s' % (item.category, item.title)
def item_description(self, item):
return item.body[:20]
| true | true |
f73b135fa8d38e86b878e4334911df84ffe15d1d | 8,616 | py | Python | RL/soft_actor.py | ksluck/Coadaptation | aa16f277cd31c324a62c832ef2cef94e28d598b8 | [
"MIT"
] | 9 | 2019-11-21T05:21:19.000Z | 2022-02-18T02:16:25.000Z | RL/soft_actor.py | ksluck/Coadaptation | aa16f277cd31c324a62c832ef2cef94e28d598b8 | [
"MIT"
] | 3 | 2020-07-31T13:34:34.000Z | 2021-09-02T07:33:22.000Z | RL/soft_actor.py | ksluck/Coadaptation | aa16f277cd31c324a62c832ef2cef94e28d598b8 | [
"MIT"
] | 5 | 2020-06-04T19:36:59.000Z | 2022-01-04T12:30:10.000Z | from rlkit.torch.sac.policies import TanhGaussianPolicy
# from rlkit.torch.sac.sac import SoftActorCritic
from rlkit.torch.networks import FlattenMlp
import numpy as np
from .rl_algorithm import RL_algorithm
from rlkit.torch.sac.sac import SACTrainer as SoftActorCritic_rlkit
import rlkit.torch.pytorch_util as ptu
import torch
import utils
# networks = {individual:, population:}
class SoftActorCritic(RL_algorithm):
def __init__(self, config, env, replay, networks):
""" Bascally a wrapper class for SAC from rlkit.
Args:
config: Configuration dictonary
env: Environment
replay: Replay buffer
networks: dict containing two sub-dicts, 'individual' and 'population'
which contain the networks.
"""
super().__init__(config, env, replay, networks)
self._variant_pop = config['rl_algorithm_config']['algo_params_pop']
self._variant_spec = config['rl_algorithm_config']['algo_params']
self._ind_qf1 = networks['individual']['qf1']
self._ind_qf2 = networks['individual']['qf2']
self._ind_qf1_target = networks['individual']['qf1_target']
self._ind_qf2_target = networks['individual']['qf2_target']
self._ind_policy = networks['individual']['policy']
self._pop_qf1 = networks['population']['qf1']
self._pop_qf2 = networks['population']['qf2']
self._pop_qf1_target = networks['population']['qf1_target']
self._pop_qf2_target = networks['population']['qf2_target']
self._pop_policy = networks['population']['policy']
self._batch_size = config['rl_algorithm_config']['batch_size']
self._nmbr_indiv_updates = config['rl_algorithm_config']['indiv_updates']
self._nmbr_pop_updates = config['rl_algorithm_config']['pop_updates']
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_spec
)
self._algorithm_pop = SoftActorCritic_rlkit(
env=self._env,
policy=self._pop_policy,
qf1=self._pop_qf1,
qf2=self._pop_qf2,
target_qf1=self._pop_qf1_target,
target_qf2=self._pop_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_pop
)
# self._algorithm_ind.to(ptu.device)
# self._algorithm_pop.to(ptu.device)
def episode_init(self):
""" Initializations to be done before the first episode.
In this case basically creates a fresh instance of SAC for the
individual networks and copies the values of the target network.
"""
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
# alt_alpha = self._alt_alpha,
**self._variant_spec
)
if self._config['rl_algorithm_config']['copy_from_gobal']:
utils.copy_pop_to_ind(networks_pop=self._networks['population'], networks_ind=self._networks['individual'])
# We have only to do this becasue the version of rlkit which we use
# creates internally a target network
# vf_dict = self._algorithm_pop.target_vf.state_dict()
# self._algorithm_ind.target_vf.load_state_dict(vf_dict)
# self._algorithm_ind.target_vf.eval()
# self._algorithm_ind.to(ptu.device)
def single_train_step(self, train_ind=True, train_pop=False):
""" A single trianing step.
Args:
train_ind: Boolean. If true the individual networks will be trained.
train_pop: Boolean. If true the population networks will be trained.
"""
if train_ind:
# Get only samples from the species buffer
self._replay.set_mode('species')
# self._algorithm_ind.num_updates_per_train_call = self._variant_spec['num_updates_per_epoch']
# self._algorithm_ind._try_to_train()
for _ in range(self._nmbr_indiv_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_ind.train(batch)
if train_pop:
# Get only samples from the population buffer
self._replay.set_mode('population')
# self._algorithm_pop.num_updates_per_train_call = self._variant_pop['num_updates_per_epoch']
# self._algorithm_pop._try_to_train()
for _ in range(self._nmbr_pop_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_pop.train(batch)
@staticmethod
def create_networks(env, config):
""" Creates all networks necessary for SAC.
These networks have to be created before instantiating this class and
used in the constructor.
Args:
config: A configuration dictonary containing population and
individual networks
Returns:
A dictonary which contains the networks.
"""
network_dict = {
'individual' : SoftActorCritic._create_networks(env=env, config=config),
'population' : SoftActorCritic._create_networks(env=env, config=config),
}
return network_dict
@staticmethod
def _create_networks(env, config):
""" Creates all networks necessary for SAC.
These networks have to be created before instantiating this class and
used in the constructor.
TODO: Maybe this should be reworked one day...
Args:
config: A configuration dictonary.
Returns:
A dictonary which contains the networks.
"""
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = config['rl_algorithm_config']['net_size']
hidden_sizes = [net_size] * config['rl_algorithm_config']['network_depth']
# hidden_sizes = [net_size, net_size, net_size]
qf1 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf1_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
policy = TanhGaussianPolicy(
hidden_sizes=hidden_sizes,
obs_dim=obs_dim,
action_dim=action_dim,
).to(device=ptu.device)
clip_value = 1.0
for p in qf1.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in qf2.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in policy.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
return {'qf1' : qf1, 'qf2' : qf2, 'qf1_target' : qf1_target, 'qf2_target' : qf2_target, 'policy' : policy}
@staticmethod
def get_q_network(networks):
""" Returns the q network from a dict of networks.
This method extracts the q-network from the dictonary of networks
created by the function create_networks.
Args:
networks: Dict containing the networks.
Returns:
The q-network as torch object.
"""
return networks['qf1']
@staticmethod
def get_policy_network(networks):
""" Returns the policy network from a dict of networks.
This method extracts the policy network from the dictonary of networks
created by the function create_networks.
Args:
networks: Dict containing the networks.
Returns:
The policy network as torch object.
"""
return networks['policy']
| 37.955947 | 119 | 0.638812 | from rlkit.torch.sac.policies import TanhGaussianPolicy
from rlkit.torch.networks import FlattenMlp
import numpy as np
from .rl_algorithm import RL_algorithm
from rlkit.torch.sac.sac import SACTrainer as SoftActorCritic_rlkit
import rlkit.torch.pytorch_util as ptu
import torch
import utils
class SoftActorCritic(RL_algorithm):
def __init__(self, config, env, replay, networks):
super().__init__(config, env, replay, networks)
self._variant_pop = config['rl_algorithm_config']['algo_params_pop']
self._variant_spec = config['rl_algorithm_config']['algo_params']
self._ind_qf1 = networks['individual']['qf1']
self._ind_qf2 = networks['individual']['qf2']
self._ind_qf1_target = networks['individual']['qf1_target']
self._ind_qf2_target = networks['individual']['qf2_target']
self._ind_policy = networks['individual']['policy']
self._pop_qf1 = networks['population']['qf1']
self._pop_qf2 = networks['population']['qf2']
self._pop_qf1_target = networks['population']['qf1_target']
self._pop_qf2_target = networks['population']['qf2_target']
self._pop_policy = networks['population']['policy']
self._batch_size = config['rl_algorithm_config']['batch_size']
self._nmbr_indiv_updates = config['rl_algorithm_config']['indiv_updates']
self._nmbr_pop_updates = config['rl_algorithm_config']['pop_updates']
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_spec
)
self._algorithm_pop = SoftActorCritic_rlkit(
env=self._env,
policy=self._pop_policy,
qf1=self._pop_qf1,
qf2=self._pop_qf2,
target_qf1=self._pop_qf1_target,
target_qf2=self._pop_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_pop
)
def episode_init(self):
self._algorithm_ind = SoftActorCritic_rlkit(
env=self._env,
policy=self._ind_policy,
qf1=self._ind_qf1,
qf2=self._ind_qf2,
target_qf1=self._ind_qf1_target,
target_qf2=self._ind_qf2_target,
use_automatic_entropy_tuning = False,
**self._variant_spec
)
if self._config['rl_algorithm_config']['copy_from_gobal']:
utils.copy_pop_to_ind(networks_pop=self._networks['population'], networks_ind=self._networks['individual'])
def single_train_step(self, train_ind=True, train_pop=False):
if train_ind:
self._replay.set_mode('species')
for _ in range(self._nmbr_indiv_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_ind.train(batch)
if train_pop:
self._replay.set_mode('population')
for _ in range(self._nmbr_pop_updates):
batch = self._replay.random_batch(self._batch_size)
self._algorithm_pop.train(batch)
@staticmethod
def create_networks(env, config):
network_dict = {
'individual' : SoftActorCritic._create_networks(env=env, config=config),
'population' : SoftActorCritic._create_networks(env=env, config=config),
}
return network_dict
@staticmethod
def _create_networks(env, config):
obs_dim = int(np.prod(env.observation_space.shape))
action_dim = int(np.prod(env.action_space.shape))
net_size = config['rl_algorithm_config']['net_size']
hidden_sizes = [net_size] * config['rl_algorithm_config']['network_depth']
qf1 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2 = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf1_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
qf2_target = FlattenMlp(
hidden_sizes=hidden_sizes,
input_size=obs_dim + action_dim,
output_size=1,
).to(device=ptu.device)
policy = TanhGaussianPolicy(
hidden_sizes=hidden_sizes,
obs_dim=obs_dim,
action_dim=action_dim,
).to(device=ptu.device)
clip_value = 1.0
for p in qf1.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in qf2.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
for p in policy.parameters():
p.register_hook(lambda grad: torch.clamp(grad, -clip_value, clip_value))
return {'qf1' : qf1, 'qf2' : qf2, 'qf1_target' : qf1_target, 'qf2_target' : qf2_target, 'policy' : policy}
@staticmethod
def get_q_network(networks):
return networks['qf1']
@staticmethod
def get_policy_network(networks):
return networks['policy']
| true | true |
f73b13c9474f958b46c5b34dc9999dcabc90129f | 6,066 | py | Python | bin/Player.py | nic-olo/Pac-Man | 381b5fef8c674d06f649b63be5619cb273a74064 | [
"MIT"
] | null | null | null | bin/Player.py | nic-olo/Pac-Man | 381b5fef8c674d06f649b63be5619cb273a74064 | [
"MIT"
] | null | null | null | bin/Player.py | nic-olo/Pac-Man | 381b5fef8c674d06f649b63be5619cb273a74064 | [
"MIT"
] | null | null | null | from settings import *
from MazeRender import coin_collision
from time import time
class Player:
def __init__(self, app):
"""initialize the player"""
self.app = app
self.player_color = self.app.settings['color']
self.player_speed = PLAYER_SPEED
self.make_player()
self.prev_time = time()
def make_player(self):
"""create the player"""
if self.app.state == 'start' or self.app.state == 'resume':
with open(MAZE_COORDINATES_PATH, 'r') as file:
for i in range(GRID_ROWS):
# read a file txt to get the coordinates of the player
line = file.readline()
for j in range(GRID_COLUMNS):
if line[j] == '5':
player_pos = [j, i]
break
self.player = self.app.canvas.create_oval(
GRID_START_X + CELL_WIDTH * player_pos[0] + PLAYER_X1,
GRID_START_Y + CELL_HEIGHT * player_pos[1] + PLAYER_Y1,
GRID_START_X + CELL_WIDTH * (player_pos[0]) + PLAYER_X2,
GRID_START_Y + CELL_HEIGHT * (player_pos[1]) + PLAYER_Y2,
fill=self.player_color
)
else:
self.player = self.app.canvas.create_oval(
self.app.player_coords,
fill=self.player_color
)
self.app.state = 'start'
def can_move(self):
"""block the player from passing through a wall"""
player_coords = self.app.canvas.coords(self.player)
for wall in self.app.walls:
wall_coords = self.app.canvas.coords(wall[2])
if self.app.player_direction == 'left' and \
abs(
player_coords[0] - (GRID_START_X + CELL_WIDTH *
(wall[0] + 1))) < 5 and \
wall_coords[1] < player_coords[3] and \
wall_coords[3] > player_coords[1]:
self.app.player_direction = None
elif self.app.player_direction == 'right' and abs(
player_coords[2] - (
GRID_START_X + CELL_WIDTH * wall[0])) < 5 and \
wall_coords[1] < player_coords[3] and wall_coords[3] > \
player_coords[1]:
self.app.player_direction = None
elif self.app.player_direction == 'up' and abs(
player_coords[1] - (GRID_START_Y + CELL_HEIGHT * (
wall[1] + 1))) < 5 and \
wall_coords[0] < player_coords[2] and wall_coords[2] > \
player_coords[0]:
self.app.player_direction = None
elif self.app.player_direction == 'down' and abs(
player_coords[3] - (
GRID_START_Y + CELL_HEIGHT * wall[1])) < 5 and \
wall_coords[0] < player_coords[2] and wall_coords[2] > \
player_coords[0]:
self.app.player_direction = None
def move_player(self):
"""check the position of the player and move it"""
def move_grid():
"""move the grid that surrounds the user"""
if self.app.state == 'start' or self.app.state == 'resume':
self.app.canvas.coords(
self.app.grid,
((positions[0] + CELL_WIDTH // 2) //
CELL_WIDTH) * CELL_WIDTH + 4,
((positions[1] + CELL_HEIGHT // 2) //
CELL_HEIGHT) * CELL_HEIGHT + 6,
((positions[2] + CELL_WIDTH // 2) //
CELL_WIDTH) * CELL_WIDTH + 4,
((positions[3] + CELL_HEIGHT // 2) //
CELL_HEIGHT) * CELL_HEIGHT + 6
)
self.app.player_coords = self.app.canvas.coords(self.app.grid)
def in_grid():
"""check if the player is in the grid before moving the player"""
if self.app.state == 'start' or self.app.state == 'resume':
if self.app.player_direction == 'up' or \
self.app.player_direction == 'down':
offset = abs(
((positions[0] - GRID_START_X) %
CELL_WIDTH) - CELL_WIDTH
)
if 3 < offset < 15:
self.app.player_direction = self.app.prev_direction
if self.app.player_direction == 'left' or \
self.app.player_direction == 'right':
offset = abs(
((positions[1] - GRID_START_Y) % CELL_HEIGHT) -
CELL_HEIGHT
)
if 3 < offset < 15:
self.app.player_direction = self.app.prev_direction
self.app.canvas.pack()
positions = self.app.canvas.coords(self.player)
in_grid()
coin_collision(self.app)
self.can_move()
now = time()
delta_time = now - self.prev_time
self.player_weighted_speed = self.player_speed * delta_time
self.prev_time = now
if self.app.player_direction == 'left':
self.app.canvas.move(self.player, -self.player_weighted_speed, 0)
elif self.app.player_direction == 'right':
self.app.canvas.move(self.player, self.player_weighted_speed, 0)
elif self.app.player_direction == 'up':
self.app.canvas.move(self.player, 0, -self.player_weighted_speed)
elif self.app.player_direction == 'down':
self.app.canvas.move(self.player, 0, self.player_weighted_speed)
self.app.prev_direction = self.app.player_direction
move_grid()
def update(self):
"""update the movement of the player"""
self.move_player()
if not self.app.is_pause:
self.app.window.after(DELAY, self.update)
| 38.636943 | 78 | 0.509397 | from settings import *
from MazeRender import coin_collision
from time import time
class Player:
def __init__(self, app):
self.app = app
self.player_color = self.app.settings['color']
self.player_speed = PLAYER_SPEED
self.make_player()
self.prev_time = time()
def make_player(self):
if self.app.state == 'start' or self.app.state == 'resume':
with open(MAZE_COORDINATES_PATH, 'r') as file:
for i in range(GRID_ROWS):
line = file.readline()
for j in range(GRID_COLUMNS):
if line[j] == '5':
player_pos = [j, i]
break
self.player = self.app.canvas.create_oval(
GRID_START_X + CELL_WIDTH * player_pos[0] + PLAYER_X1,
GRID_START_Y + CELL_HEIGHT * player_pos[1] + PLAYER_Y1,
GRID_START_X + CELL_WIDTH * (player_pos[0]) + PLAYER_X2,
GRID_START_Y + CELL_HEIGHT * (player_pos[1]) + PLAYER_Y2,
fill=self.player_color
)
else:
self.player = self.app.canvas.create_oval(
self.app.player_coords,
fill=self.player_color
)
self.app.state = 'start'
def can_move(self):
player_coords = self.app.canvas.coords(self.player)
for wall in self.app.walls:
wall_coords = self.app.canvas.coords(wall[2])
if self.app.player_direction == 'left' and \
abs(
player_coords[0] - (GRID_START_X + CELL_WIDTH *
(wall[0] + 1))) < 5 and \
wall_coords[1] < player_coords[3] and \
wall_coords[3] > player_coords[1]:
self.app.player_direction = None
elif self.app.player_direction == 'right' and abs(
player_coords[2] - (
GRID_START_X + CELL_WIDTH * wall[0])) < 5 and \
wall_coords[1] < player_coords[3] and wall_coords[3] > \
player_coords[1]:
self.app.player_direction = None
elif self.app.player_direction == 'up' and abs(
player_coords[1] - (GRID_START_Y + CELL_HEIGHT * (
wall[1] + 1))) < 5 and \
wall_coords[0] < player_coords[2] and wall_coords[2] > \
player_coords[0]:
self.app.player_direction = None
elif self.app.player_direction == 'down' and abs(
player_coords[3] - (
GRID_START_Y + CELL_HEIGHT * wall[1])) < 5 and \
wall_coords[0] < player_coords[2] and wall_coords[2] > \
player_coords[0]:
self.app.player_direction = None
def move_player(self):
def move_grid():
if self.app.state == 'start' or self.app.state == 'resume':
self.app.canvas.coords(
self.app.grid,
((positions[0] + CELL_WIDTH // 2) //
CELL_WIDTH) * CELL_WIDTH + 4,
((positions[1] + CELL_HEIGHT // 2) //
CELL_HEIGHT) * CELL_HEIGHT + 6,
((positions[2] + CELL_WIDTH // 2) //
CELL_WIDTH) * CELL_WIDTH + 4,
((positions[3] + CELL_HEIGHT // 2) //
CELL_HEIGHT) * CELL_HEIGHT + 6
)
self.app.player_coords = self.app.canvas.coords(self.app.grid)
def in_grid():
if self.app.state == 'start' or self.app.state == 'resume':
if self.app.player_direction == 'up' or \
self.app.player_direction == 'down':
offset = abs(
((positions[0] - GRID_START_X) %
CELL_WIDTH) - CELL_WIDTH
)
if 3 < offset < 15:
self.app.player_direction = self.app.prev_direction
if self.app.player_direction == 'left' or \
self.app.player_direction == 'right':
offset = abs(
((positions[1] - GRID_START_Y) % CELL_HEIGHT) -
CELL_HEIGHT
)
if 3 < offset < 15:
self.app.player_direction = self.app.prev_direction
self.app.canvas.pack()
positions = self.app.canvas.coords(self.player)
in_grid()
coin_collision(self.app)
self.can_move()
now = time()
delta_time = now - self.prev_time
self.player_weighted_speed = self.player_speed * delta_time
self.prev_time = now
if self.app.player_direction == 'left':
self.app.canvas.move(self.player, -self.player_weighted_speed, 0)
elif self.app.player_direction == 'right':
self.app.canvas.move(self.player, self.player_weighted_speed, 0)
elif self.app.player_direction == 'up':
self.app.canvas.move(self.player, 0, -self.player_weighted_speed)
elif self.app.player_direction == 'down':
self.app.canvas.move(self.player, 0, self.player_weighted_speed)
self.app.prev_direction = self.app.player_direction
move_grid()
def update(self):
self.move_player()
if not self.app.is_pause:
self.app.window.after(DELAY, self.update)
| true | true |
f73b14df601281b19ccb257989a917da8b3ff5b2 | 2,677 | py | Python | Currency Convertor - GUI based/Currency_convertor_GUI.py | avinashkranjan/PraticalPythonProjects | 12c1f7cedae57a843ceb6aba68cca48df505f341 | [
"MIT"
] | 930 | 2020-09-05T22:07:28.000Z | 2022-03-30T07:56:18.000Z | Currency Convertor - GUI based/Currency_convertor_GUI.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 893 | 2020-09-04T07:57:24.000Z | 2022-02-08T02:12:26.000Z | Currency Convertor - GUI based/Currency_convertor_GUI.py | maheshdbabar9340/Amazing-Python-Scripts | e2272048cbe49b4bda5072bbdd8479739bb6c18d | [
"MIT"
] | 497 | 2020-09-05T08:16:24.000Z | 2022-03-31T00:55:57.000Z | import requests
import tkinter as tk
from tkinter import ttk
def calculate_conversion():
# URL of respective API
url = "https://api.exchangerate-api.com/v4/latest/INR"
# Receive Data from API
data = requests.get(url).json()
currency_rates = data['rates']
# get From amount from GUI
amount = float(from_amount.get())
# Get country code from GUI
fc = from_currency_code.get()
tc = to_currency_code.get()
# Logic to convert amount to INR (if county code is not INR)
if fc != 'INR':
amount = amount/currency_rates[fc]
# INR to To_country code
amount = amount * currency_rates[tc]
amount = round(amount, 2)
# Set amount to Label in GUI
to_amount.config(text=str(amount))
if __name__ == '__main__':
# url and data extraction
url = "https://api.exchangerate-api.com/v4/latest/INR"
data = requests.get(url).json()
currency_rates = data['rates']
# Building of GUI
screen = tk.Tk()
screen.title("Currency convertor")
screen.geometry("500x300")
screen.config(bg="#282828")
# Introduction Label
main_label = tk.Label(screen, text=" Welcome to Currency Convertor ")
main_label.config(font=("Lato", 15, "bold"),
anchor="center", bg='#3500D3', fg='white')
main_label.place(x=70, y=10)
# from_amount input field and placing
from_amount = tk.Entry(screen, justify=tk.CENTER)
from_amount.place(x=58, y=180)
# Converted amount label and it's placing
to_amount = tk.Label(screen, anchor="center", bg='white',
fg='black', width=16, font=("Lato", 12))
to_amount.place(x=300, y=180)
# Variable declation for dropdown menu and set default values
from_currency_code = tk.StringVar(screen)
from_currency_code.set("INR")
to_currency_code = tk.StringVar(screen)
to_currency_code.set("INR")
# dropdown menu for from_currency and it's placing
from_currency_menu = ttk.Combobox(screen, textvariable=from_currency_code, values=list(
currency_rates.keys()), font=("Lato", 12), state='readonly', width=14, justify=tk.CENTER)
from_currency_menu.place(x=61, y=110)
# dropdown menu for to_currency and it's placing
to_currency_menu = ttk.Combobox(screen, textvariable=to_currency_code, values=list(
currency_rates.keys()), font=("Lato", 12), state='readonly', width=14, justify=tk.CENTER)
to_currency_menu.place(x=303, y=110)
# Convert button and placing
convert_btn = tk.Button(
screen, text="Convert", fg='white', bg="#3500D3", command=calculate_conversion)
convert_btn.place(x=230, y=240)
screen.mainloop()
| 31.869048 | 97 | 0.668659 | import requests
import tkinter as tk
from tkinter import ttk
def calculate_conversion():
url = "https://api.exchangerate-api.com/v4/latest/INR"
data = requests.get(url).json()
currency_rates = data['rates']
amount = float(from_amount.get())
fc = from_currency_code.get()
tc = to_currency_code.get()
if fc != 'INR':
amount = amount/currency_rates[fc]
amount = amount * currency_rates[tc]
amount = round(amount, 2)
to_amount.config(text=str(amount))
if __name__ == '__main__':
url = "https://api.exchangerate-api.com/v4/latest/INR"
data = requests.get(url).json()
currency_rates = data['rates']
screen = tk.Tk()
screen.title("Currency convertor")
screen.geometry("500x300")
screen.config(bg="#282828")
main_label = tk.Label(screen, text=" Welcome to Currency Convertor ")
main_label.config(font=("Lato", 15, "bold"),
anchor="center", bg='#3500D3', fg='white')
main_label.place(x=70, y=10)
from_amount = tk.Entry(screen, justify=tk.CENTER)
from_amount.place(x=58, y=180)
to_amount = tk.Label(screen, anchor="center", bg='white',
fg='black', width=16, font=("Lato", 12))
to_amount.place(x=300, y=180)
# Variable declation for dropdown menu and set default values
from_currency_code = tk.StringVar(screen)
from_currency_code.set("INR")
to_currency_code = tk.StringVar(screen)
to_currency_code.set("INR")
# dropdown menu for from_currency and it's placing
from_currency_menu = ttk.Combobox(screen, textvariable=from_currency_code, values=list(
currency_rates.keys()), font=("Lato", 12), state='readonly', width=14, justify=tk.CENTER)
from_currency_menu.place(x=61, y=110)
to_currency_menu = ttk.Combobox(screen, textvariable=to_currency_code, values=list(
currency_rates.keys()), font=("Lato", 12), state='readonly', width=14, justify=tk.CENTER)
to_currency_menu.place(x=303, y=110)
# Convert button and placing
convert_btn = tk.Button(
screen, text="Convert", fg='white', bg="#3500D3", command=calculate_conversion)
convert_btn.place(x=230, y=240)
screen.mainloop()
| true | true |
f73b14f39ab08ab4d7861586f6d0ef7f537d17f9 | 85 | py | Python | pydip/player/__init__.py | leo26k/pydip | dd849ed8c74d87eae6a509bc6357bc7ffd296426 | [
"MIT"
] | 13 | 2018-04-04T02:48:59.000Z | 2020-09-13T09:38:24.000Z | pydip/player/__init__.py | leo26k/pydip | dd849ed8c74d87eae6a509bc6357bc7ffd296426 | [
"MIT"
] | 6 | 2018-06-24T19:52:37.000Z | 2021-02-11T20:13:58.000Z | pydip/player/__init__.py | leo26k/pydip | dd849ed8c74d87eae6a509bc6357bc7ffd296426 | [
"MIT"
] | 5 | 2018-02-28T21:03:46.000Z | 2022-03-25T15:58:27.000Z | from pydip.player.player import Player
from pydip.player.unit import UnitTypes, Unit
| 28.333333 | 45 | 0.835294 | from pydip.player.player import Player
from pydip.player.unit import UnitTypes, Unit
| true | true |
f73b15a9ae3a0bc96541c2cb745590141feadb87 | 550 | py | Python | MetioTube/profiles/migrations/0003_profile_subscribers.py | Sheko1/MetioTube | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | [
"MIT"
] | null | null | null | MetioTube/profiles/migrations/0003_profile_subscribers.py | Sheko1/MetioTube | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | [
"MIT"
] | null | null | null | MetioTube/profiles/migrations/0003_profile_subscribers.py | Sheko1/MetioTube | c1c36d00ea46fc37cc7f3c0c9c0cae6e89b2113c | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-02 10:10
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profiles', '0002_alter_profile_profile_picture'),
]
operations = [
migrations.AddField(
model_name='profile',
name='subscribers',
field=models.ManyToManyField(related_name='subscribers', to=settings.AUTH_USER_MODEL),
),
]
| 26.190476 | 98 | 0.672727 |
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('profiles', '0002_alter_profile_profile_picture'),
]
operations = [
migrations.AddField(
model_name='profile',
name='subscribers',
field=models.ManyToManyField(related_name='subscribers', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f73b16b6fb215b937be73f7106ede2ed01c01058 | 4,053 | py | Python | sgtpy/vrmie_mixtures/density_solver.py | MatKie/SGTPy | 8e98d92fedd2b07d834e547e5154ec8f70d80728 | [
"MIT"
] | 12 | 2020-12-27T17:04:33.000Z | 2021-07-19T06:28:28.000Z | sgtpy/vrmie_mixtures/density_solver.py | MatKie/SGTPy | 8e98d92fedd2b07d834e547e5154ec8f70d80728 | [
"MIT"
] | 2 | 2021-05-15T14:27:57.000Z | 2021-08-19T15:42:24.000Z | sgtpy/vrmie_mixtures/density_solver.py | MatKie/SGTPy | 8e98d92fedd2b07d834e547e5154ec8f70d80728 | [
"MIT"
] | 5 | 2021-02-21T01:33:29.000Z | 2021-07-26T15:11:08.000Z | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import minimize_scalar, brentq
from ..constants import Na
def dPsaft_fun(rho, x, temp_aux, saft):
rhomolecular = Na * rho
global Xass
da, Xass = saft.d2afcn_drho_aux(x, rhomolecular, temp_aux, Xass)
afcn, dafcn, d2afcn = da
dPsaft = 2 * rhomolecular * dafcn + rhomolecular**2 * d2afcn
return dPsaft
def Psaft_obj(rho, x, temp_aux, saft, Pspec):
rhomolecular = Na * rho
global Xass
da, Xass = saft.dafcn_drho_aux(x, rhomolecular, temp_aux, Xass)
afcn, dafcn = da
Psaft = rhomolecular**2 * dafcn / Na
return Psaft - Pspec
def density_newton_lim(rho_a, rho_b, x, temp_aux, P, Xass0, saft):
rho = (rho_a + rho_b) / 2
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)
for i in range(15):
rho_old = rho
FO = Psaft - P
dFO = dPsaft
drho = FO/dFO
rho_new = rho - drho
if FO > 0:
rho_b = rho
else:
rho_a = rho
if rho_a < rho_new < rho_b:
rho = rho_new
else:
rho = (rho_a + rho_b) / 2
if np.abs(rho - rho_old) < 1e-6:
break
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)
return rho, Xass
def density_topliss(state, x, temp_aux, P, Xass0, saft):
if state != 'L' and state != 'V':
raise Warning("Not valid state. 'L' for liquid and 'V' for vapor.")
beta = temp_aux[0]
# lower boundary a zero density
rho_lb = 1e-5
dP_lb = Na / beta
# Upper boundary limit at infinity pressure
etamax = 0.7405
rho_lim = (6 * etamax) / np.dot(x, (saft.ms * np.pi * saft.sigma**3)) / Na
ub_sucess = False
rho_ub = 0.4 * rho_lim
it = 0
P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass0)
while not ub_sucess and it < 5:
it += 1
P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass_ub)
rho_ub += 0.15 * rho_lim
ub_sucess = P_ub > P and dP_ub > 0
# Derivative calculation at zero density
rho_lb1 = 1e-4 * rho_lim
P_lb1, dP_lb1, Xass_lb = saft.dP_drho_aux(x, rho_lb1, temp_aux, Xass0)
d2P_lb1 = (dP_lb1 - dP_lb) / rho_lb1
if d2P_lb1 > 0:
flag = 3
else:
flag = 1
global Xass
Xass = Xass0
# Stage 1
bracket = [rho_lb, rho_ub]
if flag == 1:
# Found inflexion point
sol_inf = minimize_scalar(dPsaft_fun, args=(x, temp_aux, saft),
bounds=bracket, method='Bounded',
options={'xatol': 1e-1})
rho_inf = sol_inf.x
dP_inf = sol_inf.fun
if dP_inf > 0:
flag = 3
else:
flag = 2
# Stage 2
if flag == 2:
if state == 'L':
bracket[0] = rho_inf
elif state == 'V':
bracket[1] = rho_inf
rho_ext = brentq(dPsaft_fun, bracket[0], bracket[1],
args=(x, temp_aux, saft), xtol=1e-2)
P_ext, dP_ext, Xass = saft.dP_drho_aux(x, rho_ext, temp_aux, Xass)
if P_ext > P and state == 'V':
bracket[1] = rho_ext
elif P_ext < P and state == 'L':
bracket[0] = rho_ext
else:
flag = -1
if flag == -1:
rho = np.nan
else:
rho, Xass = density_newton_lim(bracket[0], bracket[1], x, temp_aux,
P, Xass, saft)
# rho = brentq(Psaft_obj, bracket[0], bracket[1],
# args=(x, temp_aux, saft, P))
return rho, Xass
def density_newton(rho0, x, temp_aux, P, Xass0, saft):
rho = 1.*rho0
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)
for i in range(15):
FO = Psaft - P
dFO = dPsaft
drho = FO/dFO
rho -= drho
if np.abs(drho) < 1e-6:
break
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)
return rho, Xass
| 28.95 | 78 | 0.55243 | from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.optimize import minimize_scalar, brentq
from ..constants import Na
def dPsaft_fun(rho, x, temp_aux, saft):
rhomolecular = Na * rho
global Xass
da, Xass = saft.d2afcn_drho_aux(x, rhomolecular, temp_aux, Xass)
afcn, dafcn, d2afcn = da
dPsaft = 2 * rhomolecular * dafcn + rhomolecular**2 * d2afcn
return dPsaft
def Psaft_obj(rho, x, temp_aux, saft, Pspec):
rhomolecular = Na * rho
global Xass
da, Xass = saft.dafcn_drho_aux(x, rhomolecular, temp_aux, Xass)
afcn, dafcn = da
Psaft = rhomolecular**2 * dafcn / Na
return Psaft - Pspec
def density_newton_lim(rho_a, rho_b, x, temp_aux, P, Xass0, saft):
rho = (rho_a + rho_b) / 2
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)
for i in range(15):
rho_old = rho
FO = Psaft - P
dFO = dPsaft
drho = FO/dFO
rho_new = rho - drho
if FO > 0:
rho_b = rho
else:
rho_a = rho
if rho_a < rho_new < rho_b:
rho = rho_new
else:
rho = (rho_a + rho_b) / 2
if np.abs(rho - rho_old) < 1e-6:
break
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)
return rho, Xass
def density_topliss(state, x, temp_aux, P, Xass0, saft):
if state != 'L' and state != 'V':
raise Warning("Not valid state. 'L' for liquid and 'V' for vapor.")
beta = temp_aux[0]
rho_lb = 1e-5
dP_lb = Na / beta
etamax = 0.7405
rho_lim = (6 * etamax) / np.dot(x, (saft.ms * np.pi * saft.sigma**3)) / Na
ub_sucess = False
rho_ub = 0.4 * rho_lim
it = 0
P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass0)
while not ub_sucess and it < 5:
it += 1
P_ub, dP_ub, Xass_ub = saft.dP_drho_aux(x, rho_ub, temp_aux, Xass_ub)
rho_ub += 0.15 * rho_lim
ub_sucess = P_ub > P and dP_ub > 0
rho_lb1 = 1e-4 * rho_lim
P_lb1, dP_lb1, Xass_lb = saft.dP_drho_aux(x, rho_lb1, temp_aux, Xass0)
d2P_lb1 = (dP_lb1 - dP_lb) / rho_lb1
if d2P_lb1 > 0:
flag = 3
else:
flag = 1
global Xass
Xass = Xass0
bracket = [rho_lb, rho_ub]
if flag == 1:
sol_inf = minimize_scalar(dPsaft_fun, args=(x, temp_aux, saft),
bounds=bracket, method='Bounded',
options={'xatol': 1e-1})
rho_inf = sol_inf.x
dP_inf = sol_inf.fun
if dP_inf > 0:
flag = 3
else:
flag = 2
if flag == 2:
if state == 'L':
bracket[0] = rho_inf
elif state == 'V':
bracket[1] = rho_inf
rho_ext = brentq(dPsaft_fun, bracket[0], bracket[1],
args=(x, temp_aux, saft), xtol=1e-2)
P_ext, dP_ext, Xass = saft.dP_drho_aux(x, rho_ext, temp_aux, Xass)
if P_ext > P and state == 'V':
bracket[1] = rho_ext
elif P_ext < P and state == 'L':
bracket[0] = rho_ext
else:
flag = -1
if flag == -1:
rho = np.nan
else:
rho, Xass = density_newton_lim(bracket[0], bracket[1], x, temp_aux,
P, Xass, saft)
return rho, Xass
def density_newton(rho0, x, temp_aux, P, Xass0, saft):
rho = 1.*rho0
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass0)
for i in range(15):
FO = Psaft - P
dFO = dPsaft
drho = FO/dFO
rho -= drho
if np.abs(drho) < 1e-6:
break
Psaft, dPsaft, Xass = saft.dP_drho_aux(x, rho, temp_aux, Xass)
return rho, Xass
| true | true |
f73b16d4661501fd2f3412b2566c9b04393dbe8e | 1,609 | py | Python | src/huobi/huobi_ws_factory.py | EzePze/l3_data_collection | ec147fec0308f83b479878bc93ed6111aa0f5ea2 | [
"MIT"
] | 1 | 2022-01-28T11:41:55.000Z | 2022-01-28T11:41:55.000Z | src/huobi/huobi_ws_factory.py | EzePze/l3_data_collection | ec147fec0308f83b479878bc93ed6111aa0f5ea2 | [
"MIT"
] | null | null | null | src/huobi/huobi_ws_factory.py | EzePze/l3_data_collection | ec147fec0308f83b479878bc93ed6111aa0f5ea2 | [
"MIT"
] | null | null | null | from websocket_manager import WebsocketManager
class HuobiWsManagerFactory():
def get_ws_manager(self, symbol: str):
"""Jay"""
book_url = "wss://api-aws.huobi.pro/feed"
trades_url = 'wss://api-aws.huobi.pro/ws'
# Subscribe to channels
def subscribe_book(ws_manager):
request = {'sub': f'market.{symbol}.mbp.400',
'id': 'id1'
}
ws_manager.send_json(request)
request['req'] = request.pop('sub')
ws_manager.send_json(request)
def subscribe_trades(ws_manager):
request = {'sub': f'market.{symbol}.trade.detail',
'id': 'id1'
}
ws_manager.send_json(request)
# Unubscribe from channels
def unsubscribe_book(ws_manager):
request = {'unsub': f'market.{symbol}.mbp.400',
'id': 'id1'
}
ws_manager.send_json(request)
def unsubscribe_trades(ws_manager):
request = {'unsub': f'market.{symbol}.trade.detail',
'id': 'id1'
}
ws_manager.send_json(request)
trades_ws_manager = WebsocketManager(trades_url, subscribe_trades,unsubscribe_trades)
book_ws_manager = WebsocketManager(book_url, subscribe_book, unsubscribe_book)
return book_ws_manager, trades_ws_manager
def main():
ws = HuobiWsManagerFactory().get_ws_manager("btcusdt")
while True:
try:
pass
except KeyboardInterrupt:
break
if __name__ == "__main__":
main() | 30.942308 | 93 | 0.572405 | from websocket_manager import WebsocketManager
class HuobiWsManagerFactory():
def get_ws_manager(self, symbol: str):
book_url = "wss://api-aws.huobi.pro/feed"
trades_url = 'wss://api-aws.huobi.pro/ws'
def subscribe_book(ws_manager):
request = {'sub': f'market.{symbol}.mbp.400',
'id': 'id1'
}
ws_manager.send_json(request)
request['req'] = request.pop('sub')
ws_manager.send_json(request)
def subscribe_trades(ws_manager):
request = {'sub': f'market.{symbol}.trade.detail',
'id': 'id1'
}
ws_manager.send_json(request)
def unsubscribe_book(ws_manager):
request = {'unsub': f'market.{symbol}.mbp.400',
'id': 'id1'
}
ws_manager.send_json(request)
def unsubscribe_trades(ws_manager):
request = {'unsub': f'market.{symbol}.trade.detail',
'id': 'id1'
}
ws_manager.send_json(request)
trades_ws_manager = WebsocketManager(trades_url, subscribe_trades,unsubscribe_trades)
book_ws_manager = WebsocketManager(book_url, subscribe_book, unsubscribe_book)
return book_ws_manager, trades_ws_manager
def main():
ws = HuobiWsManagerFactory().get_ws_manager("btcusdt")
while True:
try:
pass
except KeyboardInterrupt:
break
if __name__ == "__main__":
main() | true | true |
f73b175257de4b4e2c28386e0fbbb8b6ba20c386 | 3,956 | py | Python | align_rudder/run_eight_alignrudder.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | 12 | 2020-09-30T08:15:44.000Z | 2021-12-22T03:36:33.000Z | align_rudder/run_eight_alignrudder.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | null | null | null | align_rudder/run_eight_alignrudder.py | ml-jku/align-rudder | 26cf4b62a713e180063cefc2921981484ebb9165 | [
"MIT"
] | 1 | 2020-12-09T21:33:28.000Z | 2020-12-09T21:33:28.000Z | import ray
from ray import tune
import gym
from align_rudder.learning.q_learning import Qlearning
import numpy as np
import random
import os
import pkg_resources
import shutil
config = {
'env_id': 'align_rudder:EightRooms-v0', # environment for the experiment
'exp_name': 'align-rudder', # name of the experiment
'gamma': 1.0, # Discount factor for q learning algorithm
'total_timesteps': 10000000,
'max_episodes': 100000,
'learning_rate': 0.1,
'epsilon': 0.2, # exploration constant
'num_seq_store': 10, # max sequences to use for alignment or storing
'num_clusters': 15, # Number of clusters to use in k-means
'consensus_thresh': 0.9, # Threshold for consensus
'eval': 40,
'top_n': 12,
'rudder': True, # Use rudder or not
'mode': 'log', # 'log' or 'exp'
'stop_criteria': '80opt',
'enough_seq': 3, # How many sequences are enough for sequence alignment
'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]), # number of demonstrations
'consensus_type': 'all', # Select between most common or threshold all sequences: all, most_common
'cluster_type': 'AP', # Use default clustering, SpectralClustering, AffinityPropogation: default, SC, AP
'seed': tune.grid_search([i for i in range(10)]), # Seed for experiment
'anneal_eps': 1.0, # annealing rate for exploration
'eps_lb': 0.0, # eps anneal lower bound
'rr_thresh': 0.005, # Inverse visitation freq below thresh, set rr to zero
'log_every': 10, # log every timesteps
'normalise_rr_by_max': True, # normalize rr by maximum reward in rr
'normalisation_scale': 10, # scale factor compared to original reward
'use_succ': True,
'use_demo': True,
'demo_path': 'demonstrations/eight_rooms.npy',
'update_alignment:': False,
'max_reward': 1,
'use_exp_replay': False,
'memory_len': 30000,
'init_mean': False,
'use_new_form': True
}
def run(config):
run_path = os.getcwd()
env_id = config['env_id']
env = gym.make(env_id)
# set seed
np.random.seed(config['seed'])
random.seed(config['seed'])
demo_path = pkg_resources.resource_filename("align_rudder", config["demo_path"])
rl = Qlearning(env=env, eps=config['epsilon'], alpha=config['learning_rate'],
total_timesteps=config['total_timesteps'],
num_store_seq=config['num_seq_store'], rudder=config['rudder'], enough_seq=config['enough_seq'],
num_clusters=config['num_clusters'], top_n=config['top_n'],
consensus_type=config['consensus_type'],
consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],
run_path=run_path,
anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],
log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],
normalisation_scale=config['normalisation_scale'], use_succ=config['use_succ'],
use_demo=config['use_demo'],
demo_path=demo_path,
num_demo_use=config['num_demo_use'],
max_episodes=config['max_episodes'], max_reward=config['max_reward'],
mode=config['mode'],
gamma=config['gamma'], stop_criteria=config['stop_criteria'], seed=config['seed'],
init_mean=config['init_mean'], use_new_form=config['use_new_form'])
rl.learn()
if __name__ == "__main__":
# clear output dir
if os.path.exists(os.path.join("results", "eight_rooms_alignrudder")):
shutil.rmtree(os.path.join("results", "eight_rooms_alignrudder"))
ray.init(temp_dir='/tmp/ray-eight-align', log_to_driver=False)
print("Starting Runs...")
# run(config)
tune.run(run, config=config, local_dir="results/", name="eight_rooms_alignrudder")
print("Finished!")
| 43.955556 | 115 | 0.653943 | import ray
from ray import tune
import gym
from align_rudder.learning.q_learning import Qlearning
import numpy as np
import random
import os
import pkg_resources
import shutil
config = {
'env_id': 'align_rudder:EightRooms-v0',
'exp_name': 'align-rudder',
'gamma': 1.0,
'total_timesteps': 10000000,
'max_episodes': 100000,
'learning_rate': 0.1,
'epsilon': 0.2,
'num_seq_store': 10,
'num_clusters': 15,
'consensus_thresh': 0.9,
'eval': 40,
'top_n': 12,
'rudder': True,
'mode': 'log',
'stop_criteria': '80opt',
'enough_seq': 3,
'num_demo_use': tune.grid_search([2, 5, 10, 50, 100]),
'consensus_type': 'all',
'cluster_type': 'AP',
'seed': tune.grid_search([i for i in range(10)]),
'anneal_eps': 1.0,
'eps_lb': 0.0,
'rr_thresh': 0.005,
'log_every': 10,
'normalise_rr_by_max': True,
'normalisation_scale': 10,
'use_succ': True,
'use_demo': True,
'demo_path': 'demonstrations/eight_rooms.npy',
'update_alignment:': False,
'max_reward': 1,
'use_exp_replay': False,
'memory_len': 30000,
'init_mean': False,
'use_new_form': True
}
def run(config):
run_path = os.getcwd()
env_id = config['env_id']
env = gym.make(env_id)
np.random.seed(config['seed'])
random.seed(config['seed'])
demo_path = pkg_resources.resource_filename("align_rudder", config["demo_path"])
rl = Qlearning(env=env, eps=config['epsilon'], alpha=config['learning_rate'],
total_timesteps=config['total_timesteps'],
num_store_seq=config['num_seq_store'], rudder=config['rudder'], enough_seq=config['enough_seq'],
num_clusters=config['num_clusters'], top_n=config['top_n'],
consensus_type=config['consensus_type'],
consensus_thresh=config['consensus_thresh'], cluster_type=config['cluster_type'],
run_path=run_path,
anneal_eps=config['anneal_eps'], eps_lb=config['eps_lb'], rr_thresh=config['rr_thresh'],
log_every=config['log_every'], normalise_rr_by_max=config['normalise_rr_by_max'],
normalisation_scale=config['normalisation_scale'], use_succ=config['use_succ'],
use_demo=config['use_demo'],
demo_path=demo_path,
num_demo_use=config['num_demo_use'],
max_episodes=config['max_episodes'], max_reward=config['max_reward'],
mode=config['mode'],
gamma=config['gamma'], stop_criteria=config['stop_criteria'], seed=config['seed'],
init_mean=config['init_mean'], use_new_form=config['use_new_form'])
rl.learn()
if __name__ == "__main__":
if os.path.exists(os.path.join("results", "eight_rooms_alignrudder")):
shutil.rmtree(os.path.join("results", "eight_rooms_alignrudder"))
ray.init(temp_dir='/tmp/ray-eight-align', log_to_driver=False)
print("Starting Runs...")
tune.run(run, config=config, local_dir="results/", name="eight_rooms_alignrudder")
print("Finished!")
| true | true |
f73b17d7a70bc3173587b91403758bdf09bcb35b | 236 | py | Python | Session3-Conditional-Statements-part2/02_weekend_or_work_day.py | elenaborisova/Crack-the-Code | d0b505ebad878d5228d98c934779ed9b28f6c034 | [
"MIT"
] | null | null | null | Session3-Conditional-Statements-part2/02_weekend_or_work_day.py | elenaborisova/Crack-the-Code | d0b505ebad878d5228d98c934779ed9b28f6c034 | [
"MIT"
] | null | null | null | Session3-Conditional-Statements-part2/02_weekend_or_work_day.py | elenaborisova/Crack-the-Code | d0b505ebad878d5228d98c934779ed9b28f6c034 | [
"MIT"
] | 1 | 2021-05-31T14:47:53.000Z | 2021-05-31T14:47:53.000Z | day = input()
if day == "Monday" or day == "Tuesday" or day == "Wednesday" or day == "Thursday" or day == "Friday":
print("Work day")
elif day == "Saturday" or day == "Sunday":
print("Weekend")
else:
print("Error")
| 26.222222 | 102 | 0.563559 | day = input()
if day == "Monday" or day == "Tuesday" or day == "Wednesday" or day == "Thursday" or day == "Friday":
print("Work day")
elif day == "Saturday" or day == "Sunday":
print("Weekend")
else:
print("Error")
| true | true |
f73b1866e507c7db610e0f505647e42453e96a22 | 20,672 | py | Python | nbdt/utils.py | lisadunlap/explainable-nbdt | e045bfd0b55b21fd87c9a233b73a0ca77672efff | [
"MIT"
] | 1 | 2021-08-28T20:17:50.000Z | 2021-08-28T20:17:50.000Z | nbdt/utils.py | lisadunlap/explainable-nbdt | e045bfd0b55b21fd87c9a233b73a0ca77672efff | [
"MIT"
] | null | null | null | nbdt/utils.py | lisadunlap/explainable-nbdt | e045bfd0b55b21fd87c9a233b73a0ca77672efff | [
"MIT"
] | null | null | null | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import numpy as np
from numpy import linalg as LA
import torch
import torch.nn as nn
import torch.nn.init as init
import torchvision.transforms as transforms
from gensim.models import Word2Vec
from pathlib import Path
# tree-generation consntants
METHODS = ('prune', 'wordnet', 'random', 'image', 'induced', 'self-induced', 'clustered', 'extra_paths', 'weighted',
'replace_node', 'insert_node', 'induced-attributes', 'prettify')
DATASETS = ('CIFAR10', 'CIFAR10IncludeClasses', 'CIFAR100', 'TinyImagenet200', 'TinyImagenet200IncludeClasses', 'Imagenet1000',
'TinyImagenet200CombineClasses', 'MiniPlaces', 'AnimalsWithAttributes2', 'CUB2011', 'MiniImagenet')
DATASET_TO_FOLDER_NAME = {
'CIFAR10': 'CIFAR10',
'CIFAR10ExcludeLabels': 'CIFAR10-zeroshot',
'CIFAR10ExcludeClasses': 'CIFAR10',
'CIFAR10IncludeLabels': 'CIFAR10-zeroshot',
'CIFAR10IncludeClasses': 'CIFAR10',
'CIFAR100': 'CIFAR100',
'TinyImagenet200': 'tiny-imagenet-200',
'TinyImagenet200IncludeClasses': 'tiny-imagenet-200-custom',
'Imagenet1000' : 'imagenet-1000',
'TinyImagenet200CombineClasses': 'tiny-imagenet-200-custom-combined',
'MiniPlaces': 'miniplaces',
'AnimalsWithAttributes2': 'Animals_with_Attributes2',
'CUB2011': 'CUB_200_2011',
'MiniImagenet': 'mini-imagenet'
}
# main script constants
CIFAR10PATHSANITY = 'CIFAR10PathSanity'
DEFAULT_CIFAR10_TREE = './data/CIFAR10/graph-wordnet-single.json'
DEFAULT_CIFAR10_WNIDS = './data/CIFAR10/wnids.txt'
DEFAULT_CIFAR100_TREE = './data/CIFAR100/graph-wordnet-single.json'
DEFAULT_CIFAR100_WNIDS = './data/CIFAR100/wnids.txt'
DEFAULT_TINYIMAGENET200_TREE = './data/tiny-imagenet-200/graph-wordnet-single.json'
DEFAULT_TINYIMAGENET200_WNIDS = './data/tiny-imagenet-200/wnids.txt'
DEFAULT_IMAGENET1000_TREE = './data/imagenet-1000/graph-wordnet-single.json'
DEFAULT_IMAGENET1000_WNIDS = './data/imagenet-1000/wnids.txt'
DEFAULT_MINIPLACES_TREE = '/data/miniplaces/graph-default.json'
DEFAULT_MINIPLACES_WNID = './data/miniplaces/wnids.txt'
DEFAULT_AWA2_TREE = '/data/Animals_with_Attributes2/graph-default.json'
DEFAULT_AWA2_WNID = './data/Animals_with_Attributes2/wnids.txt'
DEFAULT_CUB_TREE = '/data/CUB_200_2011/graph-default.json'
DEFAULT_CUB_WNID = './data/CUB_200_2011/wnids.txt'
DEFAULT_MiniImagenet_TREE = './data/mini-imagenet/graph-default.json'
DEFAULT_MiniImagenet_WNID = './data/mini-imagenet/wnids.txt'
DATASET_TO_PATHS = {
'CIFAR10': {
'path_graph': DEFAULT_CIFAR10_TREE,
'path_wnids': DEFAULT_CIFAR10_WNIDS
},
'CIFAR100': {
'path_graph': DEFAULT_CIFAR100_TREE,
'path_wnids': DEFAULT_CIFAR100_WNIDS
},
'TinyImagenet200': {
'path_graph': DEFAULT_TINYIMAGENET200_TREE,
'path_wnids': DEFAULT_TINYIMAGENET200_WNIDS
},
'Imagenet1000': {
'path_graph': DEFAULT_IMAGENET1000_TREE,
'path_wnids': DEFAULT_IMAGENET1000_WNIDS
},
'MiniPlaces': {
'path_graph': DEFAULT_MINIPLACES_TREE,
'path_wnids': DEFAULT_MINIPLACES_WNID
},
'AnimalsWithAttributes2': {
'path_graph': DEFAULT_AWA2_TREE,
'path_wnids': DEFAULT_AWA2_WNID
},
'CUB2011': {
'path_graph': DEFAULT_CUB_TREE,
'path_wnids': DEFAULT_CUB_WNID
},
'MiniImagenet': {
'path_graph': DEFAULT_MiniImagenet_TREE,
'path_wnids': DEFAULT_MiniImagenet_WNID
}
}
WORD2VEC_NAMES_TO_MODEL = {
'wiki': {
'name': 'glove-wiki-gigaword-300',
'dim': 300
},
'wiki-300': {
'name': 'glove-wiki-gigaword-300',
'dim': 300
},
'wiki-200': {
'name': 'glove-wiki-gigaword-200',
'dim': 200
},
'wiki-100': {
'name': 'glove-wiki-gigaword-100',
'dim': 100
},
'wiki-50': {
'name': 'glove-wiki-gigaword-50',
'dim': 50
},
'twitter': {
'name': 'glove-twitter-200',
'dim': 200
}
}
def populate_kwargs(args, kwargs, object, name='Dataset', keys=(), globals={}):
for key in keys:
accepts_key = getattr(object, f'accepts_{key}', False)
if not accepts_key:
continue
assert key in args or callable(accepts_key)
value = getattr(args, key, None)
if callable(accepts_key):
kwargs[key] = accepts_key(**globals)
Colors.cyan(f'{key}:\t(callable)')
elif accepts_key and value:
kwargs[key] = value
Colors.cyan(f'{key}:\t{value}')
elif value:
Colors.red(
f'Warning: {name} does not support custom '
f'{key}: {value}')
def get_transform_from_name(dataset_name, dataset, input_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# , 'TinyImagenet200IncludeClasses'
if dataset_name in ('TinyImagenet200', 'Imagenet1000', 'CUB2011'):
default_input_size = 64 if 'TinyImagenet200' in dataset_name else 224
input_size = input_size or default_input_size
transform_train = dataset.transform_train(input_size)
transform_test = dataset.transform_val(input_size)
if dataset_name in ('MiniImagenet'):
default_input_size = 84
input_size = input_size or default_input_size
transform_train = dataset.transform_train(input_size)
transform_test = dataset.transform_val(input_size)
# transform_train = transforms.Compose([
# transforms.Resize(84),
# transforms.RandomCrop(84, padding=8),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
# ])
# transform_test = transforms.Compose([
# transforms.ToTensor(),
# transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
# ])
if dataset_name in ('MiniPlaces', 'AnimalsWithAttributes2'):
transform_train = dataset.transform_train()
transform_test = dataset.transform_test()
return transform_train, transform_test
class Colors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
ENDC = '\033[0m'
BOLD = '\033[1m'
CYAN = '\x1b[36m'
@classmethod
def red(cls, *args):
print(cls.RED + args[0], *args[1:], cls.ENDC)
@classmethod
def green(cls, *args):
print(cls.GREEN + args[0], *args[1:], cls.ENDC)
@classmethod
def cyan(cls, *args):
print(cls.CYAN + args[0], *args[1:], cls.ENDC)
@classmethod
def bold(cls, *args):
print(cls.BOLD + args[0], *args[1:], cls.ENDC)
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
try:
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
except Exception as e:
print(e)
term_width = 50
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def set_np_printoptions():
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
def generate_fname(dataset, model, path_graph, wnid=None, name='',
trainset=None, include_labels=(), exclude_labels=(),
include_classes=(), num_samples=0, max_leaves_supervised=-1,
min_leaves_supervised=-1, tree_supervision_weight=0.5,
weighted_average=False, fine_tune=False,
loss='CrossEntropyLoss', word2vec=False, **kwargs):
fname = 'ckpt'
fname += '-' + dataset
fname += '-' + model
if name:
fname += '-' + name
if path_graph:
path = Path(path_graph)
fname += '-' + path.stem.replace('graph-', '', 1)
if include_labels:
labels = ",".join(map(str, include_labels))
fname += f'-incl{labels}'
if exclude_labels:
labels = ",".join(map(str, exclude_labels))
fname += f'-excl{labels}'
if include_classes:
labels = ",".join(map(str, include_classes))
fname += f'-incc{labels}'
if num_samples != 0 and num_samples is not None:
fname += f'-samples{num_samples}'
if loss != 'CrossEntropyLoss':
fname += f'-{loss}'
if max_leaves_supervised > 0:
fname += f'-mxls{max_leaves_supervised}'
if min_leaves_supervised > 0:
fname += f'-mnls{min_leaves_supervised}'
if tree_supervision_weight is not None and tree_supervision_weight != 1:
fname += f'-tsw{tree_supervision_weight}'
if weighted_average:
fname += '-weighted'
if word2vec:
fname += '-word2vec'
return fname
def get_saved_word2vec(path, dimension, projection_matrix):
word_vec = np.load(path)
word_vec = np.asarray(word_vec).reshape(1, dimension)
word_vec = np.matmul(word_vec, projection_matrix)[0]
return np.array(word_vec / LA.norm(word_vec), dtype=float)
def get_word_embedding(cls, trainset, dataset_name='CIFAR10'):
word2vec_path = os.path.join(os.path.join(trainset.root, DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
word_vec = np.load(word2vec_path + cls + '.npy')
return word_vec/LA.norm(word_vec)
def word2vec_model(net, trainset, dataset_name='CIFAR10', exclude_classes=None, pretrained=False):
""" Sets FC layer weights to word2vec embeddings, freezing them unless
exclude classes is given, in which case those specific rows are frozen in
the backward call"""
print('==> Adding in word2vec embeddings...')
if isinstance(net, nn.DataParallel):
module = net.module
else:
module = net
if pretrained:
layer = module.fc
else:
layer = module.linear
word2vec_path = os.path.join(os.path.join('./data',DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
if not os.path.exists(word2vec_path):
raise Exception("No saved word2vec embeddings, run generate_word2vec.py")
fc_weights = []
for i, cls in enumerate(trainset.classes):
word_vec = np.load(word2vec_path+cls+'.npy')
word_vec /= LA.norm(word_vec)
print(word_vec.shape, len(fc_weights))
fc_weights = np.append(fc_weights, word_vec)
print(fc_weights.shape)
print(trainset.classes, fc_weights.shape)
fc_weights = fc_weights.reshape((len(trainset.classes), int(fc_weights.shape[0]/len(trainset.classes))))
layer = nn.Linear(fc_weights.shape[1], len(trainset.classes)).to("cuda")
layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to("cuda"))
# Colors.cyan("All word2vec checks passed!")
# freeze layer
layer.weight.requires_grad = False
layer.bias.requires_grad = False
layer.requires_grad = False
Colors.cyan("Freezing FC weights..")
return net
def test_word2vec(net, trainset, dataset_name='CIFAR10', exclude_classes=None, dimension=300):
""" Check that word2vec weights are frozen in ZS rows """
word2vec_path = os.path.join(os.path.join('./data', DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
if not os.path.exists(word2vec_path):
raise Exception("No saved word2vec embeddings, run generate_word2vec.py")
net.eval()
# get FC weights
fc_weights = net.module.linear.weight.detach().cpu().numpy()
# if no exclude classes, all FC rows should be word2vec embeddings
if not exclude_classes:
for i, cls in enumerate(trainset.classes):
word_vec = word_vec = np.load(word2vec_path+cls+'.npy')
assert all(fc_weights[i] == word_vec)
else:
for i, cls in enumerate(exclude_classes):
word_vec = word_vec = np.load(word2vec_path+cls+'.npy')
assert all(fc_weights[i] == word_vec)
Colors.cyan("Freezing certain FC rows check passed!")
def normalize_weights(net, pretrained=True):
""" Check that word2vec weights are frozen in ZS rows """
net.eval()
if pretrained:
layer = net.module.fc
else:
layer = net.module.linear
# get FC weights
fc_weights = layer.weight.detach().cpu().numpy()
for i in range(len(fc_weights)):
fc_weights[i] -= np.mean(fc_weights[i])
fc_weights[i] /= LA.norm(fc_weights[i])
layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to("cuda"))
layer.weight.requires_grad = False
return net
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1, seen_to_zsl_cls={}):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
self.seen_to_zsl_cls = seen_to_zsl_cls
def smooth_one_hot(self, labels):
""" Create Soft Label """
assert 0 <= self.smoothing < 1
num_classes = len(self.cls)
label_shape = torch.Size((labels.size(0), num_classes))
confidence = 1.0 - self.smoothing
if self.smoothing == 0 or not self.seen_to_zsl_cls:
return torch.zeros_like(label_shape).scatter_(1, labels.data.unsqueeze(1), confidence)
with torch.no_grad():
true_dist = torch.zeros(size=label_shape, device=labels.device)
true_dist.scatter_(1, labels.data.unsqueeze(1), 1)
for seen, zsl in self.seen_to_zsl_cls.items():
zsl_idx, seen_idx = self.cls.index(zsl), self.cls.index(seen)
seen_selector = torch.zeros_like(labels.data.unsqueeze(1))
seen_selector[true_dist[:, seen_idx] == 1] = seen_idx
zsl_selector = torch.zeros_like(labels.data.unsqueeze(1))
zsl_selector[true_dist[:, seen_idx] == 1] = zsl_idx
true_dist.scatter_(1, seen_selector, confidence)
true_dist.scatter_(1, zsl_selector, self.smoothing)
return true_dist
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
# true_dist = pred.data.clone()
soft_label = self.smooth_one_hot(target)
return torch.mean(torch.sum(-soft_label * pred, dim=self.dim))
class MaskLoss(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(MaskLoss, self).__init__()
self.reduction = reduction
def forward(self, input, target):
N, W = input.size()
A = torch.min(input, target)
values, index = torch.max(target, 0)
B = 1/(1+torch.exp(-100*(target-.55*values)))
sums = []
for n in range(N):
value = values[n]
idx = index[n]
tar = target[n]
inp = input[n]
a = torch.min(inp, tar)
b = 1/(1+torch.exp(-100*(tar-.55*value)))
sums.append(2*torch.div(torch.dot(a,b), torch.sum(inp+target, axis=-1)))
sums = torch.stack(sums)
sums[torch.isnan(sums)] = 0.0
# return torch.mean(2*torch.div(torch.bmm(A.view(N, 1, W), B.view(N, W, 1)).view(1, N),
# torch.sum(input+target, axis=-1)), dim=-1)
return sums.mean()
def replicate(inputs, labels):
"""
inputs: torch Tensor size Bx(anything)
labels: torch tensor size Bx(num_classes)
(multilabel, where labels[i,j] is 1 if image i has class j, 0 otherwise)
Return:
rep_inputs size Kx(anything), where K is the number of 1's that appeared in all labels
rep_labels size Kx1, where rep_labels[i] is a class number that appeared in images[i]
Example:
inputs = torch.zeros((2,3))
labels = torch.Tensor([
[0,1,1,0],
[0,1,0,0]
])
rep_inputs, rep_labels = replicate(inputs, labels)
assert rep_inputs.shape == (3,3)
assert torch.all(rep_labels == torch.Tensor([1,2,1]))
"""
input_dim = len(inputs.shape)
rep_inputs, rep_labels = None, None
for (sample, label) in zip(inputs,labels):
if rep_inputs is None:
rep_labels = torch.where(label == 1.)[0]
rep_inputs = sample.unsqueeze(0).repeat(len(rep_labels),*([1] * (input_dim-1)))
else:
new_rep_labels = torch.where(label == 1.)[0]
new_reps = sample.unsqueeze(0).repeat(len(new_rep_labels),*([1] * (input_dim-1)))
rep_labels = torch.cat((rep_labels, new_rep_labels))
rep_inputs = torch.cat((rep_inputs, new_reps))
return rep_inputs, rep_labels
def replicate_outputs(inputs, num_replicate):
"""
inputs: torch Tensor size Bx(anything)
labels: torch tensor size Bx(num_classes)
(multilabel, where labels[i,j] is 1 if image i has class j, 0 otherwise)
Return:
rep_inputs size Kx(anything), where K is the number of 1's that appeared in all labels
rep_labels size Kx1, where rep_labels[i] is a class number that appeared in images[i]
Example:
inputs = torch.zeros((2,3))
labels = torch.Tensor([
[0,1,1,0],
[0,1,0,0]
])
rep_inputs, rep_labels = replicate(inputs, labels)
assert rep_inputs.shape == (3,3)
assert torch.all(rep_labels == torch.Tensor([1,2,1]))
"""
ret = {i:None for i in range(num_replicate)}
for i in range(num_replicate):
ret[i] = inputs.clone()
return ret | 35.5189 | 127 | 0.629015 | import os
import sys
import time
import math
import numpy as np
from numpy import linalg as LA
import torch
import torch.nn as nn
import torch.nn.init as init
import torchvision.transforms as transforms
from gensim.models import Word2Vec
from pathlib import Path
METHODS = ('prune', 'wordnet', 'random', 'image', 'induced', 'self-induced', 'clustered', 'extra_paths', 'weighted',
'replace_node', 'insert_node', 'induced-attributes', 'prettify')
DATASETS = ('CIFAR10', 'CIFAR10IncludeClasses', 'CIFAR100', 'TinyImagenet200', 'TinyImagenet200IncludeClasses', 'Imagenet1000',
'TinyImagenet200CombineClasses', 'MiniPlaces', 'AnimalsWithAttributes2', 'CUB2011', 'MiniImagenet')
DATASET_TO_FOLDER_NAME = {
'CIFAR10': 'CIFAR10',
'CIFAR10ExcludeLabels': 'CIFAR10-zeroshot',
'CIFAR10ExcludeClasses': 'CIFAR10',
'CIFAR10IncludeLabels': 'CIFAR10-zeroshot',
'CIFAR10IncludeClasses': 'CIFAR10',
'CIFAR100': 'CIFAR100',
'TinyImagenet200': 'tiny-imagenet-200',
'TinyImagenet200IncludeClasses': 'tiny-imagenet-200-custom',
'Imagenet1000' : 'imagenet-1000',
'TinyImagenet200CombineClasses': 'tiny-imagenet-200-custom-combined',
'MiniPlaces': 'miniplaces',
'AnimalsWithAttributes2': 'Animals_with_Attributes2',
'CUB2011': 'CUB_200_2011',
'MiniImagenet': 'mini-imagenet'
}
CIFAR10PATHSANITY = 'CIFAR10PathSanity'
DEFAULT_CIFAR10_TREE = './data/CIFAR10/graph-wordnet-single.json'
DEFAULT_CIFAR10_WNIDS = './data/CIFAR10/wnids.txt'
DEFAULT_CIFAR100_TREE = './data/CIFAR100/graph-wordnet-single.json'
DEFAULT_CIFAR100_WNIDS = './data/CIFAR100/wnids.txt'
DEFAULT_TINYIMAGENET200_TREE = './data/tiny-imagenet-200/graph-wordnet-single.json'
DEFAULT_TINYIMAGENET200_WNIDS = './data/tiny-imagenet-200/wnids.txt'
DEFAULT_IMAGENET1000_TREE = './data/imagenet-1000/graph-wordnet-single.json'
DEFAULT_IMAGENET1000_WNIDS = './data/imagenet-1000/wnids.txt'
DEFAULT_MINIPLACES_TREE = '/data/miniplaces/graph-default.json'
DEFAULT_MINIPLACES_WNID = './data/miniplaces/wnids.txt'
DEFAULT_AWA2_TREE = '/data/Animals_with_Attributes2/graph-default.json'
DEFAULT_AWA2_WNID = './data/Animals_with_Attributes2/wnids.txt'
DEFAULT_CUB_TREE = '/data/CUB_200_2011/graph-default.json'
DEFAULT_CUB_WNID = './data/CUB_200_2011/wnids.txt'
DEFAULT_MiniImagenet_TREE = './data/mini-imagenet/graph-default.json'
DEFAULT_MiniImagenet_WNID = './data/mini-imagenet/wnids.txt'
DATASET_TO_PATHS = {
'CIFAR10': {
'path_graph': DEFAULT_CIFAR10_TREE,
'path_wnids': DEFAULT_CIFAR10_WNIDS
},
'CIFAR100': {
'path_graph': DEFAULT_CIFAR100_TREE,
'path_wnids': DEFAULT_CIFAR100_WNIDS
},
'TinyImagenet200': {
'path_graph': DEFAULT_TINYIMAGENET200_TREE,
'path_wnids': DEFAULT_TINYIMAGENET200_WNIDS
},
'Imagenet1000': {
'path_graph': DEFAULT_IMAGENET1000_TREE,
'path_wnids': DEFAULT_IMAGENET1000_WNIDS
},
'MiniPlaces': {
'path_graph': DEFAULT_MINIPLACES_TREE,
'path_wnids': DEFAULT_MINIPLACES_WNID
},
'AnimalsWithAttributes2': {
'path_graph': DEFAULT_AWA2_TREE,
'path_wnids': DEFAULT_AWA2_WNID
},
'CUB2011': {
'path_graph': DEFAULT_CUB_TREE,
'path_wnids': DEFAULT_CUB_WNID
},
'MiniImagenet': {
'path_graph': DEFAULT_MiniImagenet_TREE,
'path_wnids': DEFAULT_MiniImagenet_WNID
}
}
WORD2VEC_NAMES_TO_MODEL = {
'wiki': {
'name': 'glove-wiki-gigaword-300',
'dim': 300
},
'wiki-300': {
'name': 'glove-wiki-gigaword-300',
'dim': 300
},
'wiki-200': {
'name': 'glove-wiki-gigaword-200',
'dim': 200
},
'wiki-100': {
'name': 'glove-wiki-gigaword-100',
'dim': 100
},
'wiki-50': {
'name': 'glove-wiki-gigaword-50',
'dim': 50
},
'twitter': {
'name': 'glove-twitter-200',
'dim': 200
}
}
def populate_kwargs(args, kwargs, object, name='Dataset', keys=(), globals={}):
for key in keys:
accepts_key = getattr(object, f'accepts_{key}', False)
if not accepts_key:
continue
assert key in args or callable(accepts_key)
value = getattr(args, key, None)
if callable(accepts_key):
kwargs[key] = accepts_key(**globals)
Colors.cyan(f'{key}:\t(callable)')
elif accepts_key and value:
kwargs[key] = value
Colors.cyan(f'{key}:\t{value}')
elif value:
Colors.red(
f'Warning: {name} does not support custom '
f'{key}: {value}')
def get_transform_from_name(dataset_name, dataset, input_size):
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if dataset_name in ('TinyImagenet200', 'Imagenet1000', 'CUB2011'):
default_input_size = 64 if 'TinyImagenet200' in dataset_name else 224
input_size = input_size or default_input_size
transform_train = dataset.transform_train(input_size)
transform_test = dataset.transform_val(input_size)
if dataset_name in ('MiniImagenet'):
default_input_size = 84
input_size = input_size or default_input_size
transform_train = dataset.transform_train(input_size)
transform_test = dataset.transform_val(input_size)
if dataset_name in ('MiniPlaces', 'AnimalsWithAttributes2'):
transform_train = dataset.transform_train()
transform_test = dataset.transform_test()
return transform_train, transform_test
class Colors:
RED = '\x1b[31m'
GREEN = '\x1b[32m'
ENDC = '\033[0m'
BOLD = '\033[1m'
CYAN = '\x1b[36m'
@classmethod
def red(cls, *args):
print(cls.RED + args[0], *args[1:], cls.ENDC)
@classmethod
def green(cls, *args):
print(cls.GREEN + args[0], *args[1:], cls.ENDC)
@classmethod
def cyan(cls, *args):
print(cls.CYAN + args[0], *args[1:], cls.ENDC)
@classmethod
def bold(cls, *args):
print(cls.BOLD + args[0], *args[1:], cls.ENDC)
def get_mean_and_std(dataset):
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
try:
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
except Exception as e:
print(e)
term_width = 50
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time()
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def set_np_printoptions():
np.set_printoptions(formatter={'float': lambda x: "{0:0.3f}".format(x)})
def generate_fname(dataset, model, path_graph, wnid=None, name='',
trainset=None, include_labels=(), exclude_labels=(),
include_classes=(), num_samples=0, max_leaves_supervised=-1,
min_leaves_supervised=-1, tree_supervision_weight=0.5,
weighted_average=False, fine_tune=False,
loss='CrossEntropyLoss', word2vec=False, **kwargs):
fname = 'ckpt'
fname += '-' + dataset
fname += '-' + model
if name:
fname += '-' + name
if path_graph:
path = Path(path_graph)
fname += '-' + path.stem.replace('graph-', '', 1)
if include_labels:
labels = ",".join(map(str, include_labels))
fname += f'-incl{labels}'
if exclude_labels:
labels = ",".join(map(str, exclude_labels))
fname += f'-excl{labels}'
if include_classes:
labels = ",".join(map(str, include_classes))
fname += f'-incc{labels}'
if num_samples != 0 and num_samples is not None:
fname += f'-samples{num_samples}'
if loss != 'CrossEntropyLoss':
fname += f'-{loss}'
if max_leaves_supervised > 0:
fname += f'-mxls{max_leaves_supervised}'
if min_leaves_supervised > 0:
fname += f'-mnls{min_leaves_supervised}'
if tree_supervision_weight is not None and tree_supervision_weight != 1:
fname += f'-tsw{tree_supervision_weight}'
if weighted_average:
fname += '-weighted'
if word2vec:
fname += '-word2vec'
return fname
def get_saved_word2vec(path, dimension, projection_matrix):
word_vec = np.load(path)
word_vec = np.asarray(word_vec).reshape(1, dimension)
word_vec = np.matmul(word_vec, projection_matrix)[0]
return np.array(word_vec / LA.norm(word_vec), dtype=float)
def get_word_embedding(cls, trainset, dataset_name='CIFAR10'):
word2vec_path = os.path.join(os.path.join(trainset.root, DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
word_vec = np.load(word2vec_path + cls + '.npy')
return word_vec/LA.norm(word_vec)
def word2vec_model(net, trainset, dataset_name='CIFAR10', exclude_classes=None, pretrained=False):
print('==> Adding in word2vec embeddings...')
if isinstance(net, nn.DataParallel):
module = net.module
else:
module = net
if pretrained:
layer = module.fc
else:
layer = module.linear
word2vec_path = os.path.join(os.path.join('./data',DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
if not os.path.exists(word2vec_path):
raise Exception("No saved word2vec embeddings, run generate_word2vec.py")
fc_weights = []
for i, cls in enumerate(trainset.classes):
word_vec = np.load(word2vec_path+cls+'.npy')
word_vec /= LA.norm(word_vec)
print(word_vec.shape, len(fc_weights))
fc_weights = np.append(fc_weights, word_vec)
print(fc_weights.shape)
print(trainset.classes, fc_weights.shape)
fc_weights = fc_weights.reshape((len(trainset.classes), int(fc_weights.shape[0]/len(trainset.classes))))
layer = nn.Linear(fc_weights.shape[1], len(trainset.classes)).to("cuda")
layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to("cuda"))
layer.weight.requires_grad = False
layer.bias.requires_grad = False
layer.requires_grad = False
Colors.cyan("Freezing FC weights..")
return net
def test_word2vec(net, trainset, dataset_name='CIFAR10', exclude_classes=None, dimension=300):
word2vec_path = os.path.join(os.path.join('./data', DATASET_TO_FOLDER_NAME[dataset_name]), "word2vec/")
if not os.path.exists(word2vec_path):
raise Exception("No saved word2vec embeddings, run generate_word2vec.py")
net.eval()
fc_weights = net.module.linear.weight.detach().cpu().numpy()
if not exclude_classes:
for i, cls in enumerate(trainset.classes):
word_vec = word_vec = np.load(word2vec_path+cls+'.npy')
assert all(fc_weights[i] == word_vec)
else:
for i, cls in enumerate(exclude_classes):
word_vec = word_vec = np.load(word2vec_path+cls+'.npy')
assert all(fc_weights[i] == word_vec)
Colors.cyan("Freezing certain FC rows check passed!")
def normalize_weights(net, pretrained=True):
net.eval()
if pretrained:
layer = net.module.fc
else:
layer = net.module.linear
fc_weights = layer.weight.detach().cpu().numpy()
for i in range(len(fc_weights)):
fc_weights[i] -= np.mean(fc_weights[i])
fc_weights[i] /= LA.norm(fc_weights[i])
layer.weight = nn.Parameter(torch.from_numpy(fc_weights).float().to("cuda"))
layer.weight.requires_grad = False
return net
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1, seen_to_zsl_cls={}):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
self.seen_to_zsl_cls = seen_to_zsl_cls
def smooth_one_hot(self, labels):
assert 0 <= self.smoothing < 1
num_classes = len(self.cls)
label_shape = torch.Size((labels.size(0), num_classes))
confidence = 1.0 - self.smoothing
if self.smoothing == 0 or not self.seen_to_zsl_cls:
return torch.zeros_like(label_shape).scatter_(1, labels.data.unsqueeze(1), confidence)
with torch.no_grad():
true_dist = torch.zeros(size=label_shape, device=labels.device)
true_dist.scatter_(1, labels.data.unsqueeze(1), 1)
for seen, zsl in self.seen_to_zsl_cls.items():
zsl_idx, seen_idx = self.cls.index(zsl), self.cls.index(seen)
seen_selector = torch.zeros_like(labels.data.unsqueeze(1))
seen_selector[true_dist[:, seen_idx] == 1] = seen_idx
zsl_selector = torch.zeros_like(labels.data.unsqueeze(1))
zsl_selector[true_dist[:, seen_idx] == 1] = zsl_idx
true_dist.scatter_(1, seen_selector, confidence)
true_dist.scatter_(1, zsl_selector, self.smoothing)
return true_dist
def forward(self, pred, target):
pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
soft_label = self.smooth_one_hot(target)
return torch.mean(torch.sum(-soft_label * pred, dim=self.dim))
class MaskLoss(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(MaskLoss, self).__init__()
self.reduction = reduction
def forward(self, input, target):
N, W = input.size()
A = torch.min(input, target)
values, index = torch.max(target, 0)
B = 1/(1+torch.exp(-100*(target-.55*values)))
sums = []
for n in range(N):
value = values[n]
idx = index[n]
tar = target[n]
inp = input[n]
a = torch.min(inp, tar)
b = 1/(1+torch.exp(-100*(tar-.55*value)))
sums.append(2*torch.div(torch.dot(a,b), torch.sum(inp+target, axis=-1)))
sums = torch.stack(sums)
sums[torch.isnan(sums)] = 0.0
return sums.mean()
def replicate(inputs, labels):
input_dim = len(inputs.shape)
rep_inputs, rep_labels = None, None
for (sample, label) in zip(inputs,labels):
if rep_inputs is None:
rep_labels = torch.where(label == 1.)[0]
rep_inputs = sample.unsqueeze(0).repeat(len(rep_labels),*([1] * (input_dim-1)))
else:
new_rep_labels = torch.where(label == 1.)[0]
new_reps = sample.unsqueeze(0).repeat(len(new_rep_labels),*([1] * (input_dim-1)))
rep_labels = torch.cat((rep_labels, new_rep_labels))
rep_inputs = torch.cat((rep_inputs, new_reps))
return rep_inputs, rep_labels
def replicate_outputs(inputs, num_replicate):
ret = {i:None for i in range(num_replicate)}
for i in range(num_replicate):
ret[i] = inputs.clone()
return ret | true | true |
f73b18b3bd5549421d5a7af3c2033291279afaa8 | 399 | py | Python | backend/dan3103_1_1/wsgi.py | crowdbotics-apps/dan3103-1-1 | c0adeab7e87fe0260ba69f4ac3ee144ee4b313d1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/dan3103_1_1/wsgi.py | crowdbotics-apps/dan3103-1-1 | c0adeab7e87fe0260ba69f4ac3ee144ee4b313d1 | [
"FTL",
"AML",
"RSA-MD"
] | 28 | 2021-07-25T14:51:20.000Z | 2022-01-16T13:10:18.000Z | backend/dan3103_1_1/wsgi.py | crowdbotics-apps/dan3103-1-1 | c0adeab7e87fe0260ba69f4ac3ee144ee4b313d1 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for dan3103_1_1 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dan3103_1_1.settings')
application = get_wsgi_application()
| 23.470588 | 78 | 0.789474 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'dan3103_1_1.settings')
application = get_wsgi_application()
| true | true |
f73b1ab635b1b697f3160cf1c7294e15be8c4af9 | 937 | py | Python | python/helpers/pydev/pydev_tests_python/resources/_debugger_case_unhandled_exceptions.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/helpers/pydev/pydev_tests_python/resources/_debugger_case_unhandled_exceptions.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/helpers/pydev/pydev_tests_python/resources/_debugger_case_unhandled_exceptions.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | import threading, atexit, sys
try:
from thread import start_new_thread
except:
from _thread import start_new_thread
def _atexit():
print('TEST SUCEEDED')
sys.stderr.write('TEST SUCEEDED\n')
sys.stderr.flush()
sys.stdout.flush()
# Register the TEST SUCEEDED msg to the exit of the process.
atexit.register(_atexit)
def thread_func(n):
raise Exception('in thread 1')
th = threading.Thread(target=lambda: thread_func(1))
th.setDaemon(True)
th.start()
event = threading.Event()
def thread_func2():
event.set()
raise ValueError('in thread 2')
start_new_thread(thread_func2, ())
event.wait()
th.join()
# This is a bit tricky: although we waited on the event, there's a slight chance
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
import time
time.sleep(.3)
raise IndexError('in main')
| 19.520833 | 84 | 0.715048 | import threading, atexit, sys
try:
from thread import start_new_thread
except:
from _thread import start_new_thread
def _atexit():
print('TEST SUCEEDED')
sys.stderr.write('TEST SUCEEDED\n')
sys.stderr.flush()
sys.stdout.flush()
atexit.register(_atexit)
def thread_func(n):
raise Exception('in thread 1')
th = threading.Thread(target=lambda: thread_func(1))
th.setDaemon(True)
th.start()
event = threading.Event()
def thread_func2():
event.set()
raise ValueError('in thread 2')
start_new_thread(thread_func2, ())
event.wait()
th.join()
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
import time
time.sleep(.3)
raise IndexError('in main')
| true | true |
f73b1adda646a564d017692a2c08b6df7032046b | 89 | py | Python | weconnect_cli/__main__.py | tillsteinbach/WeConnect-cli | 28b841ac1bedd76db7e81ae7a15e2310165a6061 | [
"MIT"
] | 25 | 2021-06-02T21:16:15.000Z | 2022-03-17T06:55:45.000Z | weconnect_cli/__main__.py | tillsteinbach/WeConnect-cli | 28b841ac1bedd76db7e81ae7a15e2310165a6061 | [
"MIT"
] | 40 | 2021-06-09T19:49:56.000Z | 2022-03-23T15:03:19.000Z | weconnect_cli/__main__.py | tillsteinbach/WeConnect-cli | 28b841ac1bedd76db7e81ae7a15e2310165a6061 | [
"MIT"
] | 1 | 2022-01-20T08:31:02.000Z | 2022-01-20T08:31:02.000Z | from weconnect_cli.weconnect_cli_base import main
if __name__ == '__main__':
main()
| 17.8 | 49 | 0.752809 | from weconnect_cli.weconnect_cli_base import main
if __name__ == '__main__':
main()
| true | true |
f73b1b6c3c56793177639affa6e8e0a5a0c4266e | 339 | py | Python | ex1.py | eruffaldi/pypnunum | a9d98ad8d5bdc77c75c131e67a4577c8a8107097 | [
"Apache-2.0"
] | 1 | 2017-12-01T23:33:12.000Z | 2017-12-01T23:33:12.000Z | ex1.py | eruffaldi/pypnunum | a9d98ad8d5bdc77c75c131e67a4577c8a8107097 | [
"Apache-2.0"
] | null | null | null | ex1.py | eruffaldi/pypnunum | a9d98ad8d5bdc77c75c131e67a4577c8a8107097 | [
"Apache-2.0"
] | 1 | 2018-02-01T14:30:27.000Z | 2018-02-01T14:30:27.000Z | # from Slide 21 "Divide by 0" slide
# The SORN has 10 presence bits set to represent the half-open interval (–1, 2]. Begin by taking the reciprocal, which is lossless and preserves the contiguity of the unums in the result.
from punum import *
a = Alphabet.p2()
x = Pbound((-a.one()).next(),a.one().next().next())
print (x)
print (~x) | 37.666667 | 188 | 0.696165 |
from punum import *
a = Alphabet.p2()
x = Pbound((-a.one()).next(),a.one().next().next())
print (x)
print (~x) | true | true |
f73b1b6d8a7c8a23a3b96b2886505bd7595dcd5b | 1,634 | py | Python | list_painting.py | EtlamGit/PySlicer | 4f2015ca995377d34dd176045396dcf51bc78a2a | [
"MIT"
] | 1 | 2021-02-16T16:27:54.000Z | 2021-02-16T16:27:54.000Z | list_painting.py | EtlamGit/PySlicer | 4f2015ca995377d34dd176045396dcf51bc78a2a | [
"MIT"
] | null | null | null | list_painting.py | EtlamGit/PySlicer | 4f2015ca995377d34dd176045396dcf51bc78a2a | [
"MIT"
] | null | null | null | # copyright 2019 Mojang (Microsoft Corporation), Python translation by EtlamGit
from gridSprite import gridSprite
def painting(path, x, y, w, h):
return gridSprite('assets/minecraft/textures/painting/' + path + ".png", x, y, w, h, 0, 0, 16, 16)
painting_input = 'assets/minecraft/textures/painting/paintings_kristoffer_zetterstrand.png'
painting_list = {
painting("back", 15, 0, 1, 1),
painting("kebab", 0, 0, 1, 1),
painting("aztec", 1, 0, 1, 1),
painting("alban", 2, 0, 1, 1),
painting("aztec2", 3, 0, 1, 1),
painting("bomb", 4, 0, 1, 1),
painting("plant", 5, 0, 1, 1),
painting("wasteland", 6, 0, 1, 1),
painting("pool", 0, 2, 2, 1),
painting("courbet", 2, 2, 2, 1),
painting("sea", 4, 2, 2, 1),
painting("sunset", 6, 2, 2, 1),
painting("creebet", 8, 2, 2, 1),
painting("wanderer", 0, 4, 1, 2),
painting("graham", 1, 4, 1, 2),
painting("match", 0, 8, 2, 2),
painting("bust", 2, 8, 2, 2),
painting("stage", 4, 8, 2, 2),
painting("void", 6, 8, 2, 2),
painting("skull_and_roses", 8, 8, 2, 2),
painting("wither", 10, 8, 2, 2),
painting("fighters", 0, 6, 4, 2),
painting("pointer", 0, 12, 4, 4),
painting("pigscene", 4, 12, 4, 4),
painting("burning_skull", 8, 12, 4, 4),
painting("skeleton", 12, 4, 4, 3),
painting("donkey_kong", 12, 7, 4, 3)
}
| 40.85 | 102 | 0.47858 |
from gridSprite import gridSprite
def painting(path, x, y, w, h):
return gridSprite('assets/minecraft/textures/painting/' + path + ".png", x, y, w, h, 0, 0, 16, 16)
painting_input = 'assets/minecraft/textures/painting/paintings_kristoffer_zetterstrand.png'
painting_list = {
painting("back", 15, 0, 1, 1),
painting("kebab", 0, 0, 1, 1),
painting("aztec", 1, 0, 1, 1),
painting("alban", 2, 0, 1, 1),
painting("aztec2", 3, 0, 1, 1),
painting("bomb", 4, 0, 1, 1),
painting("plant", 5, 0, 1, 1),
painting("wasteland", 6, 0, 1, 1),
painting("pool", 0, 2, 2, 1),
painting("courbet", 2, 2, 2, 1),
painting("sea", 4, 2, 2, 1),
painting("sunset", 6, 2, 2, 1),
painting("creebet", 8, 2, 2, 1),
painting("wanderer", 0, 4, 1, 2),
painting("graham", 1, 4, 1, 2),
painting("match", 0, 8, 2, 2),
painting("bust", 2, 8, 2, 2),
painting("stage", 4, 8, 2, 2),
painting("void", 6, 8, 2, 2),
painting("skull_and_roses", 8, 8, 2, 2),
painting("wither", 10, 8, 2, 2),
painting("fighters", 0, 6, 4, 2),
painting("pointer", 0, 12, 4, 4),
painting("pigscene", 4, 12, 4, 4),
painting("burning_skull", 8, 12, 4, 4),
painting("skeleton", 12, 4, 4, 3),
painting("donkey_kong", 12, 7, 4, 3)
}
| true | true |
f73b1b70433a231ed61600e509068803f6852beb | 2,591 | py | Python | readSample.py | DiegoCao/GraphSage | 55d9e41695a17e0de5fac091c35b19d43a82888d | [
"MIT"
] | null | null | null | readSample.py | DiegoCao/GraphSage | 55d9e41695a17e0de5fac091c35b19d43a82888d | [
"MIT"
] | null | null | null | readSample.py | DiegoCao/GraphSage | 55d9e41695a17e0de5fac091c35b19d43a82888d | [
"MIT"
] | null | null | null | import json as json
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
# with open('./example_data/toy-ppi-feats.npy') as load_f:
def t1():
with open('./example_data/toy-ppi-G.json') as f:
data = json.load(f)
for i in data:
print(i)
print(data['directed'])
print(data['graph'])
print(data['multigraph'])
# print(data['nodes'])
print(type(data['graph']))
print(type(data['links']))
print(type(data['nodes']))
cnt = 0
itr = 0
testcnt = 0
train_set = set()
val_set = set()
test_set = set()
for i in data['nodes']:
mid = i['id']
if itr == 0:
print(i)
itr +=1
if i['val'] == True:
cnt += 1
val_set.add(mid)
elif i['test'] == True:
testcnt += 1
test_set.add(mid)
else:
train_set.add(mid)
if i['val'] == True and i['test']== True:
print('wtf?')
itr = 0
for link in data['links']:
if itr == 0:
print(i)
itr +=1
print(link)
print(type(link))
if link['train_removed'] == True:
# print('wtf')
target = link['target']
source = link['source']
if (target not in val_set or source not in val_set) and link['test_removed'] == False:
print('damn!!')
pass
if link['test_removed'] == True:
target = link['target']
source = link['source']
assert( (target in test_set) and (source in test_set))
# print(data['links'][3])
# val_cnt = cnt
# train_cnt = len(data['nodes']) - cnt - testcnt
# print('the test cnt', testcnt)
# print('the val cnt', val_cnt)
# print('the total ', len(data['nodes']))
# print('the train ', train_cnt)
# print('the train/total', train_cnt/len(data['nodes']))
# print(cnt)
# print(len(data['nodes'])- cnt)
# res = np.load('./unsup_example_data/graphsage_mean_small_0.000010/val.npy')
# # print(res[0])
# print(len(res))
# feats = np.load('./example_data/toy-ppi-feats.npy')
# print(type(feats))
# print(type(feats[0]))
# print(feats[0])
def t2():
with open('./fljson/sto-G.json', 'r') as fp:
file = json.load(fp)
itr = 0
# for key, items in file.items():
# if itr == 0:
# itr +=1
# print(key)
G = json_graph.node_link_graph(file)
print(G.nodes[0]['val'])
for edge in G.edges():
print(edge)
if __name__ == "__main__":
t2() | 22.929204 | 98 | 0.52335 | import json as json
import numpy as np
import networkx as nx
from networkx.readwrite import json_graph
def t1():
with open('./example_data/toy-ppi-G.json') as f:
data = json.load(f)
for i in data:
print(i)
print(data['directed'])
print(data['graph'])
print(data['multigraph'])
print(type(data['graph']))
print(type(data['links']))
print(type(data['nodes']))
cnt = 0
itr = 0
testcnt = 0
train_set = set()
val_set = set()
test_set = set()
for i in data['nodes']:
mid = i['id']
if itr == 0:
print(i)
itr +=1
if i['val'] == True:
cnt += 1
val_set.add(mid)
elif i['test'] == True:
testcnt += 1
test_set.add(mid)
else:
train_set.add(mid)
if i['val'] == True and i['test']== True:
print('wtf?')
itr = 0
for link in data['links']:
if itr == 0:
print(i)
itr +=1
print(link)
print(type(link))
if link['train_removed'] == True:
target = link['target']
source = link['source']
if (target not in val_set or source not in val_set) and link['test_removed'] == False:
print('damn!!')
pass
if link['test_removed'] == True:
target = link['target']
source = link['source']
assert( (target in test_set) and (source in test_set))
2():
with open('./fljson/sto-G.json', 'r') as fp:
file = json.load(fp)
itr = 0
G = json_graph.node_link_graph(file)
print(G.nodes[0]['val'])
for edge in G.edges():
print(edge)
if __name__ == "__main__":
t2() | true | true |
f73b1dac7b0f21573292a25ba3fc727d67fbe221 | 1,084 | py | Python | c7n/resources/directconnect.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | null | null | null | c7n/resources/directconnect.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | 4 | 2017-02-02T17:08:23.000Z | 2017-05-25T19:33:19.000Z | c7n/resources/directconnect.py | ticketmaster/cloud-custodian | 0da3866f70f858895af228cc08706d0909a2a324 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('directconnect')
class DirectConnect(QueryResourceManager):
class resource_type(object):
service = 'directconnect'
enum_spec = ('describe_connections', 'connections', None)
id = 'connectionId'
name = 'connectionName'
filter_name = 'connectionId'
dimension = None
| 36.133333 | 82 | 0.750923 |
from __future__ import absolute_import, division, print_function, unicode_literals
from c7n.manager import resources
from c7n.query import QueryResourceManager
@resources.register('directconnect')
class DirectConnect(QueryResourceManager):
class resource_type(object):
service = 'directconnect'
enum_spec = ('describe_connections', 'connections', None)
id = 'connectionId'
name = 'connectionName'
filter_name = 'connectionId'
dimension = None
| true | true |
f73b1e259bec1d20d80014c7ec153ef4e694f241 | 4,730 | py | Python | tensorflow_federated/tools/development/setup.py | VonRosenchild/federated | ad3986f8587a0f1dd0c6ce738db1fef436cb826f | [
"Apache-2.0"
] | 1 | 2019-10-10T06:19:52.000Z | 2019-10-10T06:19:52.000Z | tensorflow_federated/tools/development/setup.py | wangcaihua/federated | c8c7fe84d20f6c16a2a9f290a05179b5422257b6 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/tools/development/setup.py | wangcaihua/federated | c8c7fe84d20f6c16a2a9f290a05179b5422257b6 | [
"Apache-2.0"
] | 2 | 2019-10-10T06:19:41.000Z | 2021-01-28T03:06:55.000Z | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Federated is an open-source federated learning framework.
TensorFlow Federated (TFF) is an open-source framework for machine learning and
other computations on decentralized data. TFF has been developed to facilitate
open research and experimentation with Federated Learning (FL), an approach to
machine learning where a shared global model is trained across many
participating clients that keep their training data locally. For example, FL has
been used to train prediction models for mobile keyboards without uploading
sensitive typing data to servers.
TFF enables developers to use the included federated learning algorithms with
their models and data, as well as to experiment with novel algorithms. The
building blocks provided by TFF can also be used to implement non-learning
computations, such as aggregated analytics over decentralized data.
TFF's interfaces are organized in two layers:
* Federated Learning (FL) API
The `tff.learning` layer offers a set of high-level interfaces that allow
developers to apply the included implementations of federated training and
evaluation to their existing TensorFlow models.
* Federated Core (FC) API
At the core of the system is a set of lower-level interfaces for concisely
expressing novel federated algorithms by combining TensorFlow with distributed
communication operators within a strongly-typed functional programming
environment. This layer also serves as the foundation upon which we've built
`tff.learning`.
TFF enables developers to declaratively express federated computations, so they
could be deployed to diverse runtime environments. Included with TFF is a
single-machine simulation runtime for experiments. Please visit the
tutorials and try it out yourself!
"""
# TODO(b/124800187): Keep in sync with the contents of README.
import sys
import setuptools
DOCLINES = __doc__.split('\n')
project_name = 'tensorflow_federated'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
with open('tensorflow_federated/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
VERSION = globals_dict['__version__']
REQUIRED_PACKAGES = [
'absl-py~=0.7',
'attrs~=18.2',
'cachetools~=3.1.1',
'enum34~=1.1',
'grpcio~=1.22.0',
'h5py~=2.6',
'numpy~=1.14',
'portpicker~=1.3.1',
'six~=1.10',
'tensorflow-model-optimization~=0.1.3',
'tensorflow-privacy~=0.1.0',
'tf-nightly',
'tfa-nightly',
]
setuptools.setup(
name=project_name,
version=VERSION,
packages=setuptools.find_packages(exclude=('tools')),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
long_description_content_type='text/plain',
author='Google Inc.',
author_email='packages@tensorflow.org',
url='http://tensorflow.org/federated',
download_url='https://github.com/tensorflow/federated/tags',
install_requires=REQUIRED_PACKAGES,
# PyPI package information.
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
),
license='Apache 2.0',
keywords='tensorflow federated machine learning',
)
| 38.770492 | 80 | 0.72685 |
import sys
import setuptools
DOCLINES = __doc__.split('\n')
project_name = 'tensorflow_federated'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
with open('tensorflow_federated/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict)
VERSION = globals_dict['__version__']
REQUIRED_PACKAGES = [
'absl-py~=0.7',
'attrs~=18.2',
'cachetools~=3.1.1',
'enum34~=1.1',
'grpcio~=1.22.0',
'h5py~=2.6',
'numpy~=1.14',
'portpicker~=1.3.1',
'six~=1.10',
'tensorflow-model-optimization~=0.1.3',
'tensorflow-privacy~=0.1.0',
'tf-nightly',
'tfa-nightly',
]
setuptools.setup(
name=project_name,
version=VERSION,
packages=setuptools.find_packages(exclude=('tools')),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
long_description_content_type='text/plain',
author='Google Inc.',
author_email='packages@tensorflow.org',
url='http://tensorflow.org/federated',
download_url='https://github.com/tensorflow/federated/tags',
install_requires=REQUIRED_PACKAGES,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
),
license='Apache 2.0',
keywords='tensorflow federated machine learning',
)
| true | true |
f73b1fd0dff6c5078536096345d9bbb124ff8787 | 106 | py | Python | ml_api/standardizer/apps.py | codeamazone/ml_api | c93a4165341ac8fc60e35072d3874fa7c5808004 | [
"MIT"
] | null | null | null | ml_api/standardizer/apps.py | codeamazone/ml_api | c93a4165341ac8fc60e35072d3874fa7c5808004 | [
"MIT"
] | null | null | null | ml_api/standardizer/apps.py | codeamazone/ml_api | c93a4165341ac8fc60e35072d3874fa7c5808004 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class StandardizerConfig(AppConfig):
name = 'ml_api.standardizer'
| 17.666667 | 36 | 0.783019 | from django.apps import AppConfig
class StandardizerConfig(AppConfig):
name = 'ml_api.standardizer'
| true | true |
f73b200ee856d7a7b7583f69a1d86b9fdab46ec6 | 649 | py | Python | dbl/__init__.py | TAG-Epic/DBL-Python-Library | c81af619fafc3a065e4d0bcfcf12c198e784308c | [
"MIT"
] | null | null | null | dbl/__init__.py | TAG-Epic/DBL-Python-Library | c81af619fafc3a065e4d0bcfcf12c198e784308c | [
"MIT"
] | null | null | null | dbl/__init__.py | TAG-Epic/DBL-Python-Library | c81af619fafc3a065e4d0bcfcf12c198e784308c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
DBL Python API Wrapper
~~~~~~~~~~~~~~~~~~~~~~
A basic wrapper for the top.gg API.
:copyright: (c) 2020 Assanali Mukhanov & top.gg
:license: MIT, see LICENSE for more details.
"""
__title__ = 'dblpy'
__author__ = 'Francis Taylor'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Assanali Mukhanov'
__version__ = '0.3.4'
from collections import namedtuple
from .client import DBLClient
from .errors import *
from .http import HTTPClient
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major = 0, minor = 3, micro = 4, releaselevel = 'final', serial = 0)
| 24.961538 | 95 | 0.705701 |
__title__ = 'dblpy'
__author__ = 'Francis Taylor'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Assanali Mukhanov'
__version__ = '0.3.4'
from collections import namedtuple
from .client import DBLClient
from .errors import *
from .http import HTTPClient
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major = 0, minor = 3, micro = 4, releaselevel = 'final', serial = 0)
| true | true |
f73b208a223485b598b88acf029f28f42be6cdf5 | 591 | py | Python | life/users/api/serializers/lsg.py | coronasafe/life_backend | 00eacfdc5cd544dc136fb306340fb0d56afa78ff | [
"MIT"
] | 1 | 2021-05-15T20:40:00.000Z | 2021-05-15T20:40:00.000Z | life/users/api/serializers/lsg.py | coronasafe/life_backend | 00eacfdc5cd544dc136fb306340fb0d56afa78ff | [
"MIT"
] | null | null | null | life/users/api/serializers/lsg.py | coronasafe/life_backend | 00eacfdc5cd544dc136fb306340fb0d56afa78ff | [
"MIT"
] | 2 | 2021-05-08T01:45:42.000Z | 2021-05-17T02:00:10.000Z | from rest_framework import serializers
from life.users.models import District, LocalBody, State, Ward
class StateSerializer(serializers.ModelSerializer):
class Meta:
model = State
fields = "__all__"
class DistrictSerializer(serializers.ModelSerializer):
class Meta:
model = District
fields = "__all__"
class LocalBodySerializer(serializers.ModelSerializer):
class Meta:
model = LocalBody
fields = "__all__"
class WardSerializer(serializers.ModelSerializer):
class Meta:
model = Ward
fields = "__all__"
| 21.107143 | 62 | 0.693739 | from rest_framework import serializers
from life.users.models import District, LocalBody, State, Ward
class StateSerializer(serializers.ModelSerializer):
class Meta:
model = State
fields = "__all__"
class DistrictSerializer(serializers.ModelSerializer):
class Meta:
model = District
fields = "__all__"
class LocalBodySerializer(serializers.ModelSerializer):
class Meta:
model = LocalBody
fields = "__all__"
class WardSerializer(serializers.ModelSerializer):
class Meta:
model = Ward
fields = "__all__"
| true | true |
f73b21df9146bd2a60817565790da9653cc85c65 | 2,579 | py | Python | corehq/form_processor/tasks.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | corehq/form_processor/tasks.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | corehq/form_processor/tasks.py | satyaakam/commcare-hq | 233f255ff20ab3a16013e9fdfdb9c1dcf632e415 | [
"BSD-3-Clause"
] | null | null | null | import time
from datetime import timedelta
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from corehq.form_processor.reprocess import reprocess_unfinished_stub
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from corehq.util.metrics import metrics_counter, metrics_gauge
from couchforms.models import UnfinishedSubmissionStub
from dimagi.utils.couch import CriticalSection
from dimagi.utils.logging import notify_exception
SUBMISSION_REPROCESS_CELERY_QUEUE = 'submission_reprocessing_queue'
@no_result_task(serializer='pickle', queue=SUBMISSION_REPROCESS_CELERY_QUEUE, acks_late=True)
def reprocess_submission(submssion_stub_id):
with CriticalSection(['reprocess_submission_%s' % submssion_stub_id]):
try:
stub = UnfinishedSubmissionStub.objects.get(id=submssion_stub_id)
except UnfinishedSubmissionStub.DoesNotExist:
return
reprocess_unfinished_stub(stub)
metrics_counter('commcare.submission_reprocessing.count')
@periodic_task(run_every=crontab(minute='*/5'), queue=settings.CELERY_PERIODIC_QUEUE)
def _reprocess_archive_stubs():
reprocess_archive_stubs.delay()
@serial_task("reprocess_archive_stubs", queue=settings.CELERY_PERIODIC_QUEUE)
def reprocess_archive_stubs():
# Check for archive stubs
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from couchforms.models import UnfinishedArchiveStub
stubs = UnfinishedArchiveStub.objects.filter(attempts__lt=3)
metrics_gauge('commcare.unfinished_archive_stubs', len(stubs))
start = time.time()
cutoff = start + timedelta(minutes=4).total_seconds()
for stub in stubs:
# Exit this task after 4 minutes so that tasks remain short
if time.time() > cutoff:
return
try:
xform = FormAccessors(stub.domain).get_form(form_id=stub.xform_id)
# If the history wasn't updated the first time around, run the whole thing again.
if not stub.history_updated:
FormAccessors.do_archive(xform, stub.archive, stub.user_id, trigger_signals=True)
# If the history was updated the first time around, just send the update to kafka
else:
FormAccessors.publish_archive_action_to_kafka(xform, stub.user_id, stub.archive)
except Exception:
# Errors should not prevent processing other stubs
notify_exception(None, "Error processing UnfinishedArchiveStub")
| 42.278689 | 97 | 0.759209 | import time
from datetime import timedelta
from celery.schedules import crontab
from celery.task import periodic_task
from django.conf import settings
from corehq.form_processor.reprocess import reprocess_unfinished_stub
from corehq.util.celery_utils import no_result_task
from corehq.util.decorators import serial_task
from corehq.util.metrics import metrics_counter, metrics_gauge
from couchforms.models import UnfinishedSubmissionStub
from dimagi.utils.couch import CriticalSection
from dimagi.utils.logging import notify_exception
SUBMISSION_REPROCESS_CELERY_QUEUE = 'submission_reprocessing_queue'
@no_result_task(serializer='pickle', queue=SUBMISSION_REPROCESS_CELERY_QUEUE, acks_late=True)
def reprocess_submission(submssion_stub_id):
with CriticalSection(['reprocess_submission_%s' % submssion_stub_id]):
try:
stub = UnfinishedSubmissionStub.objects.get(id=submssion_stub_id)
except UnfinishedSubmissionStub.DoesNotExist:
return
reprocess_unfinished_stub(stub)
metrics_counter('commcare.submission_reprocessing.count')
@periodic_task(run_every=crontab(minute='*/5'), queue=settings.CELERY_PERIODIC_QUEUE)
def _reprocess_archive_stubs():
reprocess_archive_stubs.delay()
@serial_task("reprocess_archive_stubs", queue=settings.CELERY_PERIODIC_QUEUE)
def reprocess_archive_stubs():
from corehq.form_processor.interfaces.dbaccessors import FormAccessors
from couchforms.models import UnfinishedArchiveStub
stubs = UnfinishedArchiveStub.objects.filter(attempts__lt=3)
metrics_gauge('commcare.unfinished_archive_stubs', len(stubs))
start = time.time()
cutoff = start + timedelta(minutes=4).total_seconds()
for stub in stubs:
if time.time() > cutoff:
return
try:
xform = FormAccessors(stub.domain).get_form(form_id=stub.xform_id)
if not stub.history_updated:
FormAccessors.do_archive(xform, stub.archive, stub.user_id, trigger_signals=True)
# If the history was updated the first time around, just send the update to kafka
else:
FormAccessors.publish_archive_action_to_kafka(xform, stub.user_id, stub.archive)
except Exception:
# Errors should not prevent processing other stubs
notify_exception(None, "Error processing UnfinishedArchiveStub")
| true | true |
f73b225668494337de64ccf00db5a6f15f513153 | 12,630 | py | Python | tests/test_dipdup/test_index.py | dipdup-net/dipdup-py | 1f42e3788d8eec1021e5002837471ce31dafe6ee | [
"MIT"
] | 39 | 2021-04-13T10:53:27.000Z | 2022-02-11T00:53:44.000Z | tests/test_dipdup/test_index.py | dipdup-net/dipdup-py | 1f42e3788d8eec1021e5002837471ce31dafe6ee | [
"MIT"
] | 113 | 2021-06-01T18:16:42.000Z | 2022-03-28T06:12:58.000Z | tests/test_dipdup/test_index.py | dipdup-net/dipdup-py | 1f42e3788d8eec1021e5002837471ce31dafe6ee | [
"MIT"
] | 16 | 2021-05-26T07:04:40.000Z | 2022-03-29T06:50:25.000Z | import datetime
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock
from dipdup.config import ContractConfig
from dipdup.config import OperationHandlerConfig
from dipdup.config import OperationHandlerTransactionPatternConfig
from dipdup.config import OperationIndexConfig
from dipdup.config import OperationType
from dipdup.config import TzktDatasourceConfig
from dipdup.index import OperationIndex
from dipdup.index import extract_operation_subgroups
from dipdup.models import OperationData
add_liquidity_operations = (
OperationData(
type='transaction',
id=76905130,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
target_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
initiator_address=None,
amount=None,
status='applied',
has_internals=True,
storage={
'admin': 'KT1Kfu13FmNbcZSjTPZLrAUbEYNZim6vtg6d',
'lpFee': '400',
'paused': False,
'token1Id': '0',
'token2Id': '0',
'systemFee': '1000',
'token1_Fee': '0',
'token2_Fee': '0',
'token1Check': False,
'token1_pool': '470000000000000000000',
'token2Check': False,
'token2_pool': '16000000',
'totalSupply': '86717933554715',
'maxSwapLimit': '40',
'token1Address': 'KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
'token2Address': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'lpTokenAddress': 'KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
},
block=None,
sender_alias=None,
nonce=None,
target_alias='PLENTY / SMAK Swap',
initiator_alias=None,
entrypoint='AddLiquidity',
parameter_json={
'recipient': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'token1_max': '470000000000000000000',
'token2_max': '16000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
),
OperationData(
type='transaction',
id=76905131,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'paused': False,
'balances': 3943,
'metadata': 3944,
'lastUpdate': '1676287',
'totalSupply': '14712639179877222051752285',
'administrator': 'KT1GpTEq4p2XZ8w9p5xM7Wayyw5VR7tb3UaW',
'token_metadata': 3945,
'tokensPerBlock': '50000000000000000000',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=0,
target_alias='PLENTY',
initiator_alias=None,
entrypoint='transfer',
parameter_json={
'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': '470000000000000000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 3943,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {
'balance': '1141847967508578897233841',
'approvals': {
'KT19Dskaofi6ZTkrw3Tq4pK7fUqHqCz4pTZ3': '0',
'KT1AbuUaPQmYLsB8n8FdSzBrxvrsm8ctwW1V': '0',
'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0',
'KT1HUnqM6xFJa51PM2xHfLs7s6ARvXungtyq': '0',
'KT1HZkD2T4uczgYkZ6fb9gm1fymeJoRuezLz': '9060000000000000000000',
'KT1NtsnKQ1c3rYB12ZToP77XaJs8WDBvF221': '0',
'KT1PuPNtDFLR6U7e7vDuxunDoKasVT6kMSkz': '0',
'KT1UNBvCJXiwJY6tmHM7CJUVwNPew53XkSfh': '0',
'KT1VeNQa4mucRj36qAJ9rTzm4DTJKfemVaZT': '0',
'KT1X1LgNkQShpF9nRLYw3Dgdy4qp38MX617z': '0',
'KT1XVrXmWY9AdVri6KpxKo4CWxizKajmgzMt': '0',
'KT1XXAavg3tTj12W1ADvd3EEnm1pu6XTmiEF': '550000000000000000',
'KT1XutoFJ9dXvWxT7ttG86N2tSTUEpatFVTm': '0',
},
},
},
},
{
'bigmap': 3943,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '470000000000000000000', 'approvals': {}},
},
},
),
),
OperationData(
type='transaction',
id=76905132,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'freezer': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'balances': 1798,
'metadata': 1800,
'totalSupply': '896083333000',
'administrator': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'token_metadata': 1801,
'frozen_accounts': 1799,
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=1,
target_alias='Smartlink',
initiator_alias=None,
entrypoint='transfer',
parameter_json={'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', 'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4', 'value': '16000000'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 1798,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '208684', 'approvals': {'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0'}},
},
},
{
'bigmap': 1798,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '16000000', 'approvals': {}},
},
},
),
),
OperationData(
type='transaction',
id=76905133,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'balances': 14107,
'metadata': 14108,
'totalSupply': '86717933554615',
'administrator': 'tz1ZnK6zYJrC9PfKCPryg9tPW6LrERisTGtg',
'securityCheck': True,
'token_metadata': 14109,
'exchangeAddress': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=2,
target_alias='PLENTY / SMAK LP Token',
initiator_alias=None,
entrypoint='mint',
parameter_json={'value': '86717933554615', 'address': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 14107,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '86717933554615', 'approvals': {}},
},
},
),
),
)
index_config = OperationIndexConfig(
datasource=TzktDatasourceConfig(kind='tzkt', url='https://api.tzkt.io', http=None),
kind='operation',
handlers=(
OperationHandlerConfig(
callback='on_fa12_and_fa12_add_liquidity',
pattern=(
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm'),
entrypoint='AddLiquidity',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b', typename='plenty_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X', typename='smak_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z', typename='plenty_smak_lp'),
entrypoint='mint',
optional=False,
),
),
),
),
types=(OperationType.transaction, OperationType.origination),
contracts=[ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm')],
first_level=0,
last_level=0,
)
index_config.name = 'asdf'
class MatcherTest(IsolatedAsyncioTestCase):
async def test_match_smak_add_liquidity(self) -> None:
index = OperationIndex(None, index_config, None) # type: ignore
index._prepare_handler_args = AsyncMock() # type: ignore
all_filtered = tuple(extract_operation_subgroups(add_liquidity_operations, set(), set()))
assert not all_filtered
operation_subgroups = tuple(
extract_operation_subgroups(
add_liquidity_operations,
addresses=index_config.address_filter,
entrypoints=index_config.entrypoint_filter,
)
)
assert len(operation_subgroups) == 1
matched_handlers = await index._match_operation_subgroup(operation_subgroups[0])
assert len(matched_handlers) == 1
index._prepare_handler_args.assert_called()
| 40.480769 | 139 | 0.584086 | import datetime
from unittest import IsolatedAsyncioTestCase
from unittest.mock import AsyncMock
from dipdup.config import ContractConfig
from dipdup.config import OperationHandlerConfig
from dipdup.config import OperationHandlerTransactionPatternConfig
from dipdup.config import OperationIndexConfig
from dipdup.config import OperationType
from dipdup.config import TzktDatasourceConfig
from dipdup.index import OperationIndex
from dipdup.index import extract_operation_subgroups
from dipdup.models import OperationData
add_liquidity_operations = (
OperationData(
type='transaction',
id=76905130,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
target_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
initiator_address=None,
amount=None,
status='applied',
has_internals=True,
storage={
'admin': 'KT1Kfu13FmNbcZSjTPZLrAUbEYNZim6vtg6d',
'lpFee': '400',
'paused': False,
'token1Id': '0',
'token2Id': '0',
'systemFee': '1000',
'token1_Fee': '0',
'token2_Fee': '0',
'token1Check': False,
'token1_pool': '470000000000000000000',
'token2Check': False,
'token2_pool': '16000000',
'totalSupply': '86717933554715',
'maxSwapLimit': '40',
'token1Address': 'KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
'token2Address': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'lpTokenAddress': 'KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
},
block=None,
sender_alias=None,
nonce=None,
target_alias='PLENTY / SMAK Swap',
initiator_alias=None,
entrypoint='AddLiquidity',
parameter_json={
'recipient': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'token1_max': '470000000000000000000',
'token2_max': '16000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
),
OperationData(
type='transaction',
id=76905131,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'paused': False,
'balances': 3943,
'metadata': 3944,
'lastUpdate': '1676287',
'totalSupply': '14712639179877222051752285',
'administrator': 'KT1GpTEq4p2XZ8w9p5xM7Wayyw5VR7tb3UaW',
'token_metadata': 3945,
'tokensPerBlock': '50000000000000000000',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=0,
target_alias='PLENTY',
initiator_alias=None,
entrypoint='transfer',
parameter_json={
'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': '470000000000000000000',
},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 3943,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {
'balance': '1141847967508578897233841',
'approvals': {
'KT19Dskaofi6ZTkrw3Tq4pK7fUqHqCz4pTZ3': '0',
'KT1AbuUaPQmYLsB8n8FdSzBrxvrsm8ctwW1V': '0',
'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0',
'KT1HUnqM6xFJa51PM2xHfLs7s6ARvXungtyq': '0',
'KT1HZkD2T4uczgYkZ6fb9gm1fymeJoRuezLz': '9060000000000000000000',
'KT1NtsnKQ1c3rYB12ZToP77XaJs8WDBvF221': '0',
'KT1PuPNtDFLR6U7e7vDuxunDoKasVT6kMSkz': '0',
'KT1UNBvCJXiwJY6tmHM7CJUVwNPew53XkSfh': '0',
'KT1VeNQa4mucRj36qAJ9rTzm4DTJKfemVaZT': '0',
'KT1X1LgNkQShpF9nRLYw3Dgdy4qp38MX617z': '0',
'KT1XVrXmWY9AdVri6KpxKo4CWxizKajmgzMt': '0',
'KT1XXAavg3tTj12W1ADvd3EEnm1pu6XTmiEF': '550000000000000000',
'KT1XutoFJ9dXvWxT7ttG86N2tSTUEpatFVTm': '0',
},
},
},
},
{
'bigmap': 3943,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '470000000000000000000', 'approvals': {}},
},
},
),
),
OperationData(
type='transaction',
id=76905132,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'freezer': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'balances': 1798,
'metadata': 1800,
'totalSupply': '896083333000',
'administrator': 'KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X',
'token_metadata': 1801,
'frozen_accounts': 1799,
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=1,
target_alias='Smartlink',
initiator_alias=None,
entrypoint='transfer',
parameter_json={'to': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', 'from': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4', 'value': '16000000'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 1798,
'path': 'balances',
'action': 'update_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '208684', 'approvals': {'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU': '0'}},
},
},
{
'bigmap': 1798,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprugvwjodjwqmGVVryY5uqz9fcg6BndukYj6bproCFShQ6nkuG8e',
'key': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
'value': {'balance': '16000000', 'approvals': {}},
},
},
),
),
OperationData(
type='transaction',
id=76905133,
level=1676582,
timestamp=datetime.datetime(2021, 9, 8, 16, 2, 14, tzinfo=datetime.timezone.utc),
hash='opWVrmpgeuQ2tz65DcV5USnCFW7j7x97XQ2BzEAcmefPEjUfkMw',
counter=15811432,
sender_address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
target_address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z',
initiator_address='tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
amount=None,
status='applied',
has_internals=False,
storage={
'balances': 14107,
'metadata': 14108,
'totalSupply': '86717933554615',
'administrator': 'tz1ZnK6zYJrC9PfKCPryg9tPW6LrERisTGtg',
'securityCheck': True,
'token_metadata': 14109,
'exchangeAddress': 'KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU',
},
block=None,
sender_alias='PLENTY / SMAK Swap',
nonce=2,
target_alias='PLENTY / SMAK LP Token',
initiator_alias=None,
entrypoint='mint',
parameter_json={'value': '86717933554615', 'address': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4'},
originated_contract_address=None,
originated_contract_alias=None,
originated_contract_type_hash=None,
originated_contract_code_hash=None,
diffs=(
{
'bigmap': 14107,
'path': 'balances',
'action': 'add_key',
'content': {
'hash': 'exprtkqafR3YBedPSHP6Lts8WVHn4jj853RfRZiTpzYNP8KKLaU12H',
'key': 'tz1cmAfyjWW3Rf3tH3M3maCpwsiAwBKbtmG4',
'value': {'balance': '86717933554615', 'approvals': {}},
},
},
),
),
)
index_config = OperationIndexConfig(
datasource=TzktDatasourceConfig(kind='tzkt', url='https://api.tzkt.io', http=None),
kind='operation',
handlers=(
OperationHandlerConfig(
callback='on_fa12_and_fa12_add_liquidity',
pattern=(
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm'),
entrypoint='AddLiquidity',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1GRSvLoikDsXujKgZPsGLX8k8VvR2Tq95b', typename='plenty_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1TwzD6zV3WeJ39ukuqxcfK2fJCnhvrdN1X', typename='smak_token'),
entrypoint='transfer',
optional=False,
),
OperationHandlerTransactionPatternConfig(
type='transaction',
source=None,
destination=ContractConfig(address='KT1NLZah1MKeWuveQvdsCqAUCjksKw8J296z', typename='plenty_smak_lp'),
entrypoint='mint',
optional=False,
),
),
),
),
types=(OperationType.transaction, OperationType.origination),
contracts=[ContractConfig(address='KT1BEC9uHmADgVLXCm3wxN52qJJ85ohrWEaU', typename='plenty_smak_amm')],
first_level=0,
last_level=0,
)
index_config.name = 'asdf'
class MatcherTest(IsolatedAsyncioTestCase):
async def test_match_smak_add_liquidity(self) -> None:
index = OperationIndex(None, index_config, None)
index._prepare_handler_args = AsyncMock()
all_filtered = tuple(extract_operation_subgroups(add_liquidity_operations, set(), set()))
assert not all_filtered
operation_subgroups = tuple(
extract_operation_subgroups(
add_liquidity_operations,
addresses=index_config.address_filter,
entrypoints=index_config.entrypoint_filter,
)
)
assert len(operation_subgroups) == 1
matched_handlers = await index._match_operation_subgroup(operation_subgroups[0])
assert len(matched_handlers) == 1
index._prepare_handler_args.assert_called()
| true | true |
f73b232e3b8f1bb1960d37d81ff29c6e8ec2c696 | 2,551 | py | Python | chebpy/core/exceptions.py | janniklasrose/chebpy | f69ca920e7c672e4a37e53a7d32f580c536f3462 | [
"BSD-3-Clause"
] | 90 | 2016-09-02T23:00:13.000Z | 2022-02-08T15:20:21.000Z | chebpy/core/exceptions.py | janniklasrose/chebpy | f69ca920e7c672e4a37e53a7d32f580c536f3462 | [
"BSD-3-Clause"
] | 55 | 2016-11-16T06:18:51.000Z | 2022-03-25T14:25:17.000Z | chebpy/core/exceptions.py | janniklasrose/chebpy | f69ca920e7c672e4a37e53a7d32f580c536f3462 | [
"BSD-3-Clause"
] | 22 | 2016-08-30T01:28:22.000Z | 2022-01-10T11:13:06.000Z | from abc import ABC, abstractmethod
class ChebpyBaseException(Exception, ABC):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = self.default_message
def __str__(self):
return self.message
@property
@abstractmethod
def default_message(self):
raise NotImplementedError
# ===============================================
# chebpy.core.utilities.Interval exceptions
# ===============================================
IntervalOverlap = type(
"IntervalOverlap",
(ChebpyBaseException,),
{"default_message": "The supplied Interval objects overlap"},
)
IntervalGap = type(
"IntervalGap",
(ChebpyBaseException,),
{
"default_message": "The supplied Interval objects do not form a complete "
"partition of the approximation interval"
},
)
IntervalMismatch = type(
"IntervalMismatch",
(ChebpyBaseException,),
{
"default_message": "This operation can only be performed for Fun objects "
"defined on identical intervals"
},
)
NotSubinterval = type(
"NotSubinterval",
(ChebpyBaseException,),
{"default_message": "Not a subinterval"},
)
IntervalValues = type(
"IntervalValues",
(ChebpyBaseException,),
{
"default_message": "The defining values of a Interval object must be "
"strictly increasing"
},
)
# ===============================================
# chebpy.core.utilities.Domain exceptions
# ===============================================
InvalidDomain = type(
"InvalidDomain",
(ChebpyBaseException,),
{
"default_message": "Domain objects must be initialised from an iterable "
"collection of at least two monotonically increasing "
"scalars"
},
)
NotSubdomain = type(
"NotSubdomain",
(ChebpyBaseException,),
{
"default_message": "The support of the target Domain object is required "
"to define a subinterval of the support of the "
"original"
},
)
SupportMismatch = type(
"SupportMismatch",
(ChebpyBaseException,),
{
"default_message": "Both objects are required to be supported on the "
"same interval"
},
)
BadFunLengthArgument = type(
"BadFunLengthArgument",
(ChebpyBaseException,),
{
"default_message": "The 'n' argument must be either a single numeric "
"value, or iterable thereof posessing one fewer "
"elements than the size of the domain"
},
)
| 22.377193 | 82 | 0.593101 | from abc import ABC, abstractmethod
class ChebpyBaseException(Exception, ABC):
def __init__(self, *args):
if args:
self.message = args[0]
else:
self.message = self.default_message
def __str__(self):
return self.message
@property
@abstractmethod
def default_message(self):
raise NotImplementedError
IntervalOverlap = type(
"IntervalOverlap",
(ChebpyBaseException,),
{"default_message": "The supplied Interval objects overlap"},
)
IntervalGap = type(
"IntervalGap",
(ChebpyBaseException,),
{
"default_message": "The supplied Interval objects do not form a complete "
"partition of the approximation interval"
},
)
IntervalMismatch = type(
"IntervalMismatch",
(ChebpyBaseException,),
{
"default_message": "This operation can only be performed for Fun objects "
"defined on identical intervals"
},
)
NotSubinterval = type(
"NotSubinterval",
(ChebpyBaseException,),
{"default_message": "Not a subinterval"},
)
IntervalValues = type(
"IntervalValues",
(ChebpyBaseException,),
{
"default_message": "The defining values of a Interval object must be "
"strictly increasing"
},
)
InvalidDomain = type(
"InvalidDomain",
(ChebpyBaseException,),
{
"default_message": "Domain objects must be initialised from an iterable "
"collection of at least two monotonically increasing "
"scalars"
},
)
NotSubdomain = type(
"NotSubdomain",
(ChebpyBaseException,),
{
"default_message": "The support of the target Domain object is required "
"to define a subinterval of the support of the "
"original"
},
)
SupportMismatch = type(
"SupportMismatch",
(ChebpyBaseException,),
{
"default_message": "Both objects are required to be supported on the "
"same interval"
},
)
BadFunLengthArgument = type(
"BadFunLengthArgument",
(ChebpyBaseException,),
{
"default_message": "The 'n' argument must be either a single numeric "
"value, or iterable thereof posessing one fewer "
"elements than the size of the domain"
},
)
| true | true |
f73b2407866493cd9a430a65f59ed27e4347d3a9 | 3,252 | py | Python | ScheduledDeliveryWebApplication/app/models/address_model.py | leitao-bcc/MovileNext3_Backend_LucasLeitao | 15bdd8a96711a2e305078cd2f152b86374dbe276 | [
"Unlicense"
] | null | null | null | ScheduledDeliveryWebApplication/app/models/address_model.py | leitao-bcc/MovileNext3_Backend_LucasLeitao | 15bdd8a96711a2e305078cd2f152b86374dbe276 | [
"Unlicense"
] | null | null | null | ScheduledDeliveryWebApplication/app/models/address_model.py | leitao-bcc/MovileNext3_Backend_LucasLeitao | 15bdd8a96711a2e305078cd2f152b86374dbe276 | [
"Unlicense"
] | null | null | null | from app.models.base_model import BaseModel, db
from app.validators.cep_validator import is_valid_cep
from app.validators.coordinates_validator import is_valid_latitude, \
is_valid_longitude
from app.validators.none_or_empty_validator import is_none_or_empty
from app.validators.string_format_validator import is_float
class AddressModel(db.Model, BaseModel):
__tablename__ = 'addresses'
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(45), nullable=False)
state = db.Column(db.String(45), nullable=False)
city = db.Column(db.String(45), nullable=False)
neighborhood = db.Column(db.String(45), nullable=False)
street_name = db.Column(db.String(90), nullable=False)
street_number = db.Column(db.String(10))
postal_code = db.Column(db.String(10), nullable=False)
complement = db.Column(db.String(45))
latitude = db.Column(db.Float(9), nullable=False)
longitude = db.Column(db.Float(9), nullable=False)
def __init__(self, country, state, city, neighborhood, street_name,
street_number, postal_code, complement,
latitude, longitude):
if is_none_or_empty(country):
raise ValueError("Address Country {}".format(country))
if is_none_or_empty(state):
raise ValueError("Address State {}".format(state))
if is_none_or_empty(city):
raise ValueError("Address City {}".format(city))
if is_none_or_empty(neighborhood):
raise ValueError("Address Neighborhood {}".format(neighborhood))
if is_none_or_empty(street_name):
raise ValueError("Address StreetName {}".format(street_name))
postal_code = str(postal_code)
if not is_valid_cep(postal_code):
raise ValueError("Address PostalCode {}".format(postal_code))
if not is_float(latitude):
raise ValueError("Address Latitude {}".format(latitude))
latitude = float(latitude)
if not is_valid_latitude(latitude):
raise ValueError("Address Latitude {}".format(latitude))
if not is_float(longitude):
raise ValueError("Address Longitude {}".format(longitude))
longitude = float(longitude)
if not is_valid_longitude(longitude):
raise ValueError("Address Longitude {}".format(longitude))
self.country = country
self.state = state
self.city = city
self.neighborhood = neighborhood
self.street_name = street_name
self.street_number = street_number
self.postal_code = postal_code
self.complement = complement
self.latitude = latitude
self.longitude = longitude
def __repr__(self):
return "<AddressModel %r>" % self.street_name
def to_json(self):
return {
"country": self.country,
"state": self.state,
"city": self.city,
"neighborhood": self.neighborhood,
"streetName": self.street_name,
"streetNumber": self.street_number,
"postalCode": self.postal_code,
"complement": self.complement,
"latitude": self.latitude,
"longitude": self.longitude
}
| 36.954545 | 76 | 0.654982 | from app.models.base_model import BaseModel, db
from app.validators.cep_validator import is_valid_cep
from app.validators.coordinates_validator import is_valid_latitude, \
is_valid_longitude
from app.validators.none_or_empty_validator import is_none_or_empty
from app.validators.string_format_validator import is_float
class AddressModel(db.Model, BaseModel):
__tablename__ = 'addresses'
id = db.Column(db.Integer, primary_key=True)
country = db.Column(db.String(45), nullable=False)
state = db.Column(db.String(45), nullable=False)
city = db.Column(db.String(45), nullable=False)
neighborhood = db.Column(db.String(45), nullable=False)
street_name = db.Column(db.String(90), nullable=False)
street_number = db.Column(db.String(10))
postal_code = db.Column(db.String(10), nullable=False)
complement = db.Column(db.String(45))
latitude = db.Column(db.Float(9), nullable=False)
longitude = db.Column(db.Float(9), nullable=False)
def __init__(self, country, state, city, neighborhood, street_name,
street_number, postal_code, complement,
latitude, longitude):
if is_none_or_empty(country):
raise ValueError("Address Country {}".format(country))
if is_none_or_empty(state):
raise ValueError("Address State {}".format(state))
if is_none_or_empty(city):
raise ValueError("Address City {}".format(city))
if is_none_or_empty(neighborhood):
raise ValueError("Address Neighborhood {}".format(neighborhood))
if is_none_or_empty(street_name):
raise ValueError("Address StreetName {}".format(street_name))
postal_code = str(postal_code)
if not is_valid_cep(postal_code):
raise ValueError("Address PostalCode {}".format(postal_code))
if not is_float(latitude):
raise ValueError("Address Latitude {}".format(latitude))
latitude = float(latitude)
if not is_valid_latitude(latitude):
raise ValueError("Address Latitude {}".format(latitude))
if not is_float(longitude):
raise ValueError("Address Longitude {}".format(longitude))
longitude = float(longitude)
if not is_valid_longitude(longitude):
raise ValueError("Address Longitude {}".format(longitude))
self.country = country
self.state = state
self.city = city
self.neighborhood = neighborhood
self.street_name = street_name
self.street_number = street_number
self.postal_code = postal_code
self.complement = complement
self.latitude = latitude
self.longitude = longitude
def __repr__(self):
return "<AddressModel %r>" % self.street_name
def to_json(self):
return {
"country": self.country,
"state": self.state,
"city": self.city,
"neighborhood": self.neighborhood,
"streetName": self.street_name,
"streetNumber": self.street_number,
"postalCode": self.postal_code,
"complement": self.complement,
"latitude": self.latitude,
"longitude": self.longitude
}
| true | true |
f73b24f5f48434cd6014034a47134e9c3fe120b7 | 3,781 | py | Python | contrib/macdeploy/custom_dsstore.py | byeonggoon/blockteam1-1 | 59e7890bb3d6c1091355c8e91898fdb7199a3328 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | byeonggoon/blockteam1-1 | 59e7890bb3d6c1091355c8e91898fdb7199a3328 | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | byeonggoon/blockteam1-1 | 59e7890bb3d6c1091355c8e91898fdb7199a3328 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00hashrootuser:\x00Documents:\x00hashroot:\x00hashroot:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/hashrootuser/Documents/hashroot/hashroot/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Hashroot-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.983607 | 1,817 | 0.72785 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00hashrootuser:\x00Documents:\x00hashroot:\x00hashroot:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/hashrootuser/Documents/hashroot/hashroot/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Hashroot-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f73b27fbb4c48b340935f0983840b3a389f98432 | 1,619 | py | Python | src/uefi/BaseTools/Source/Python/Ecc/FileProfile.py | kkennett/oscore.dev | 59e786f12f9af969211c95a9d2863b1767528341 | [
"BSD-3-Clause"
] | null | null | null | src/uefi/BaseTools/Source/Python/Ecc/FileProfile.py | kkennett/oscore.dev | 59e786f12f9af969211c95a9d2863b1767528341 | [
"BSD-3-Clause"
] | null | null | null | src/uefi/BaseTools/Source/Python/Ecc/FileProfile.py | kkennett/oscore.dev | 59e786f12f9af969211c95a9d2863b1767528341 | [
"BSD-3-Clause"
] | null | null | null | ## @file
# fragments of source file
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import re
import Common.LongFilePathOs as os
from Ecc.ParserWarning import Warning
from Common.LongFilePathSupport import OpenLongFilePath as open
CommentList = []
PPDirectiveList = []
PredicateExpressionList = []
FunctionDefinitionList = []
VariableDeclarationList = []
EnumerationDefinitionList = []
StructUnionDefinitionList = []
TypedefDefinitionList = []
FunctionCallingList = []
## record file data when parsing source
#
# May raise Exception when opening file.
#
class FileProfile :
## The constructor
#
# @param self The object pointer
# @param FileName The file that to be parsed
#
def __init__(self, FileName):
self.FileLinesList = []
self.FileLinesListFromFile = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesListFromFile = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
| 27.913793 | 85 | 0.692403 |
from __future__ import absolute_import
import re
import Common.LongFilePathOs as os
from Ecc.ParserWarning import Warning
from Common.LongFilePathSupport import OpenLongFilePath as open
CommentList = []
PPDirectiveList = []
PredicateExpressionList = []
FunctionDefinitionList = []
VariableDeclarationList = []
EnumerationDefinitionList = []
StructUnionDefinitionList = []
TypedefDefinitionList = []
FunctionCallingList = []
ef __init__(self, FileName):
self.FileLinesList = []
self.FileLinesListFromFile = []
try:
fsock = open(FileName, "rb", 0)
try:
self.FileLinesListFromFile = fsock.readlines()
finally:
fsock.close()
except IOError:
raise Warning("Error when opening file %s" % FileName)
| true | true |
f73b2857b7b40bf899f8c58f6e44ffa98b4400cf | 1,755 | py | Python | burger_war_dev/scripts/transform_enemy_pose.py | Gantetsu-robocon/burger_war_dev | e74e966dfc1d7a27c372cd794364625311ea5ec2 | [
"BSD-3-Clause"
] | null | null | null | burger_war_dev/scripts/transform_enemy_pose.py | Gantetsu-robocon/burger_war_dev | e74e966dfc1d7a27c372cd794364625311ea5ec2 | [
"BSD-3-Clause"
] | null | null | null | burger_war_dev/scripts/transform_enemy_pose.py | Gantetsu-robocon/burger_war_dev | e74e966dfc1d7a27c372cd794364625311ea5ec2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#TF enemy position from ralative_pos topic
#Add time losed enemy to color_flag
import rospy
import tf2_ros
import tf_conversions
import tf
import math
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Quaternion
class TransformEnemy():
def __init__(self):
#Get parameter
self.rate = rospy.get_param("~rate", 1)
#self.side = rospy.get_param("~side", "r")
#Broadcaster, Subscriber
self.br = tf2_ros.TransformBroadcaster()
self.enemy_sub = rospy.Subscriber('/relative_pose', PoseStamped, self.enemyCallback)
#Initialize
self.enemy_ps = PoseStamped()
q = tf.transformations.quaternion_from_euler(0.0, 0.0, math.pi)
rotation = Quaternion(*q)
self.enemy_ps.pose.position.x = 2.6
self.enemy_ps.pose.orientation = rotation
def enemyCallback(self,data):
self.enemy_ps.pose.position = data.pose.position
self.enemy_ps.pose.orientation = data.pose.orientation
def tf_enemy_pose(self):
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = 'base_footprint'
t.child_frame_id = 'enemy_pos'
t.transform.translation = self.enemy_ps.pose.position
t.transform.rotation = self.enemy_ps.pose.orientation
self.br.sendTransform(t)
def main(self):
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
self.tf_enemy_pose()
rate.sleep()
if __name__ == '__main__':
rospy.init_node('enemy_tf_broadcaster')
br_enemy = TransformEnemy()
br_enemy.main()
| 27.421875 | 92 | 0.65812 |
import rospy
import tf2_ros
import tf_conversions
import tf
import math
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import Quaternion
class TransformEnemy():
def __init__(self):
self.rate = rospy.get_param("~rate", 1)
self.br = tf2_ros.TransformBroadcaster()
self.enemy_sub = rospy.Subscriber('/relative_pose', PoseStamped, self.enemyCallback)
self.enemy_ps = PoseStamped()
q = tf.transformations.quaternion_from_euler(0.0, 0.0, math.pi)
rotation = Quaternion(*q)
self.enemy_ps.pose.position.x = 2.6
self.enemy_ps.pose.orientation = rotation
def enemyCallback(self,data):
self.enemy_ps.pose.position = data.pose.position
self.enemy_ps.pose.orientation = data.pose.orientation
def tf_enemy_pose(self):
t = TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = 'base_footprint'
t.child_frame_id = 'enemy_pos'
t.transform.translation = self.enemy_ps.pose.position
t.transform.rotation = self.enemy_ps.pose.orientation
self.br.sendTransform(t)
def main(self):
rate = rospy.Rate(self.rate)
while not rospy.is_shutdown():
self.tf_enemy_pose()
rate.sleep()
if __name__ == '__main__':
rospy.init_node('enemy_tf_broadcaster')
br_enemy = TransformEnemy()
br_enemy.main()
| true | true |
f73b289ccdf3c5960f72cc446fc16b93de1fbde3 | 72,219 | py | Python | Tools/ProxyStubGenerator/CppParser.py | mallikarjuna54/Thunder | 9c91ff5fab29cd29c136daa3a5bfa6da068e328a | [
"Apache-2.0"
] | null | null | null | Tools/ProxyStubGenerator/CppParser.py | mallikarjuna54/Thunder | 9c91ff5fab29cd29c136daa3a5bfa6da068e328a | [
"Apache-2.0"
] | null | null | null | Tools/ProxyStubGenerator/CppParser.py | mallikarjuna54/Thunder | 9c91ff5fab29cd29c136daa3a5bfa6da068e328a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# If not stated otherwise in this file or this component's license file the
# following copyright and licenses apply:
#
# Copyright 2020 Metrological
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# C++ header parser
#
import re, uuid, sys, copy, hashlib, os
from collections import OrderedDict
from enum import IntEnum
class ParserError(RuntimeError):
def __init__(self, msg):
msg = "%s(%s): parse error: %s" % (CurrentFile(), CurrentLine(), msg)
super(ParserError, self).__init__(msg)
class LoaderError(RuntimeError):
def __init__(self, file, msg):
msg = "%s: load error: %s" % (file, msg)
super(LoaderError, self).__init__(msg)
# Checks if identifier is valid.
def is_valid(token):
if "operator" in token:
return re.match(r'^[-\+~<>=!%&^*/\|\[\]]+$', token[8:])
else:
validChars = re.match(r'^[a-zA-Z0-9_~]+$', token)
return token and validChars and not token[0].isdigit()
def ASSERT_ISVALID(token):
if not is_valid(token):
raise ParserError("invalid identifier: '" + token + "'")
elif token in ["alignas", "alignof"]:
raise ParserError("alignment specifiers are not supported")
def ASSERT_ISEXPECTED(token, list):
if token not in list:
raise ParserError("unexpected identifier: '" + token + "', expected one of " + str(list))
# -------------------------------------------------------------------------
# CLASS DEFINITIONS
# -------------------------------------------------------------------------
global_namespace = None
class Ref(IntEnum):
VALUE = 1
POINTER = 2
REFERENCE = 4
RVALUE_REFERENCE = 8
CONST = 16,
VOLATILE = 32,
CONST_POINTER = 64,
VOLATILE_POINTER = 128,
class Metadata:
def __init__(self):
self.brief = ""
self.details = ""
self.input = False
self.output = False
self.is_property = False
self.is_deprecated = False
self.length = None
self.maxlength = None
self.interface = None
self.text = None
self.param = OrderedDict()
self.retval = OrderedDict()
class BaseType:
def __init__(self, type):
self.type = type
def Proto(self):
return self.type
def __str__(self):
return self.Proto()
class Undefined(BaseType):
def __init__(self, type, comment=""):
BaseType.__init__(self, type)
self.comment = comment
def Proto(self):
if isinstance(self.type, list):
if (type(self.type[0]) is str):
return self.comment + " ".join(self.type).replace(" < ", "<").replace(" :: ", "::").replace(
" >", ">").replace(" *", "*").replace(" &", "&").replace(" &&", "&&")
else:
return self.comment + " ".join([str(x) for x in self.type])
else:
return self.comment + str(self.type)
def __repr__(self):
return "undefined %s" % self.Proto()
class Fundamental(BaseType):
def __init__(self, type):
BaseType.__init__(self, type)
def __repr__(self):
return "fundamental %s" % self.type
class Intrinsic(BaseType):
def __init__(self, type):
BaseType.__init__(self, type)
def __repr__(self):
return "intrinsic %s" % self.type
class BuiltinInteger(Intrinsic):
def __init__(self, fixed_size = False):
Intrinsic.__init__(self, "builtin_integer")
self.fixed = fixed_size
def IsFixed(self):
return self.fixed
class String(Intrinsic):
def __init__(self, std=False):
Intrinsic.__init__(self, "std::string" if std else "string")
class Nullptr_t(Fundamental):
def __init__(self):
Fundamental.__init__(self, "std::nullptr_t")
class Void(Fundamental):
def __init__(self):
Fundamental.__init__(self, "void")
class Bool(Fundamental):
def __init__(self):
Fundamental.__init__(self, "bool")
class Integer(Fundamental):
def __init__(self, string):
Fundamental.__init__(self, string)
self.signed = "unsigned" not in self.type and "uint" not in self.type
if self.type == "char":
self.size = "char"
elif self.type == "wchar_t":
self.size = "wchar"
elif "8" in self.type:
self.size = "char"
elif "16" in self.type:
self.size = "short"
elif "32" in self.type:
self.size = "long"
elif "64" in self.type:
self.size = "long long"
else:
self.size = " ".join(self.type.split()[1:])
def IsFixed(self):
return self.size != "int"
class Float(Fundamental):
def __init__(self, string):
Fundamental.__init__(self, string)
# Holds identifier type
class Identifier():
def __init__(self, parent_block, parent, string, valid_specifiers, tags_allowed=True):
self.parent = parent_block
self.meta = Metadata()
parent.specifiers = []
self.name = ""
type = ["?"] # indexing safety
type_found = False
nest1 = 0
nest2 = 0
array = False
skip = 0
self.value = []
if string.count("*") > 1:
raise ParserError("pointers to pointers are not supported: '%s'" % (" ".join(string)))
elif string.count("[") > 1:
raise ParserError("multi-dimensional arrays are not supported: '%s'" % (" ".join(string)))
elif "[" in string and "*" in string:
raise ParserError("arrays of pointers are not supported: '%s'" % (" ".join(string)))
elif "&&" in string:
raise ParserError("rvalue references are not supported: '%s'" % (" ".join(string)))
for i, token in enumerate(string):
if not token:
continue
if isinstance(token, Type):
# encountered a token that has already been parsed
type.append(token)
type_found = True
continue
if skip > 0:
skip -= 1
continue
# just keep together anything that comes within <> or () brackets
# template arguments will be parsed when/if instantiated
if token == "(":
type[-1] += " ("
type_found = False
nest1 += 1
elif token == ")":
type[-1] += " )"
if nest1 == 0 and not nest2:
type_found = True
nest2 -= 1
elif token == "<":
type.append("<")
type_found = False
nest2 += 1
elif token == ">":
type[-1] += " >"
nest2 -= 1
if nest2 == 0 and not nest1:
type_found = True
elif nest1 or nest2:
type[-1] += " " + token
# handle pointer/reference markers
elif token[0] == "@":
if token[1:] == "IN":
if tags_allowed:
self.meta.input = True
else:
raise ParserError("in/out tags not allowed on return value")
elif token[1:] == "OUT":
if tags_allowed:
self.meta.output = True
else:
raise ParserError("in/out tags not allowed on return value")
elif token[1:] == "LENGTH":
self.meta.length = string[i + 1]
skip = 1
continue
elif token[1:] == "MAXLENGTH":
if tags_allowed:
self.meta.maxlength = string[i + 1]
else:
raise ParserError("maxlength tag not allowed on return value")
skip = 1
continue
elif token[1:] == "INTERFACE":
self.meta.interface = string[i + 1]
skip = 1
elif token[1:] == "PROPERTY":
self.meta.is_property = True
elif token[1:] == "BRIEF":
self.meta.brief = string[i + 1]
skip = 1
elif token[1:] == "DETAILS":
self.meta.details = string[i + 1]
skip = 1
elif token[1:] == "PARAM":
par = string[i + 1]
if par.endswith(":"):
par = par[:-1]
self.meta.param[par] = string[i + 2]
skip = 2
elif token[1:] == "RETVAL":
par = string[i + 1]
if par.endswith(":"):
par = par[:-1]
self.meta.retval[par] = string[i + 2]
skip = 2
elif token[1:] == "DEPRECATED":
self.meta.is_deprecated = True
elif token[1:] == "TEXT":
self.meta.text = "".join(string[i + 1])
skip = 1
else:
raise ParserError("invalid tag: " + token)
# skip C-style explicit struct
elif token in ["struct", "class", "union"]:
continue
elif token in ["export"]: # skip
continue
# keep identifers with scope operator together
elif token == "::":
if len(type) > 1:
type[-1] += "::"
type_found = False
# arrays are equivalent to pointers here, so make it uniform
# disregard anything that's inside the brackets
elif token == "[":
array = True
elif token == "]":
array = False
type.append("*")
elif token in ["*", "&"]:
type.append(token)
elif token in ["const", "volatile", "constexpr"]:
if token == "constexpr":
parent.specifiers.append("constexpr")
token = "const"
# put qualifiers in order
if "*" in type:
type.insert(type.index("*") + 1, token)
elif "&" in type:
type.insert(type.index("&") + 1, token)
else:
type.insert(1, token)
# include valid specifiers
elif token in valid_specifiers:
parent.specifiers.append(token)
elif not type_found and not array:
# handle primitive type combinations...
if isinstance(type[-1], str):
if (token in ["int"]) and (type[-1].split()[-1] in ["signed", "unsigned", "short", "long"]):
type[-1] += " " + token
elif (token in ["char", "short", "long"]) and (type[-1].split()[-1] in ["signed", "unsigned"]):
type[-1] += " " + token
elif (token in ["long", "double"]) and (type[-1].split()[-1] in ["long"]):
type[-1] += " " + token
# keep identifers with scope operator together
elif type[-1].endswith("::"):
type[-1] += token
# keep together anything that comes within <> or () brackets
elif nest1 == 0 and nest2 == 0:
type.append(token)
else:
type[-1] += token
if ((i == len(string) - 1) or (string[i + 1] not in ["char", "short", "long", "int", "double"])):
type_found = True
elif type_found:
if not array:
self.name = token
if array:
raise ParserError("unmatched bracket '['")
type = type[1:]
self.type = type
# Normalize fundamental types
if type and isinstance(type[-1], str):
t = type[-1]
if t.split()[-1] in ["char", "short", "long", "int", "double", "float", "signed", "unsigned"]:
if "double" in t:
type[-1] = "long double" if "long" in t else "double"
elif "float" in t:
type[-1] = "float"
elif "long" in t:
if t.count("long") == 1:
type[-1] = "unsigned long" if "unsigned" in t else "signed long"
else:
type[-1] = "unsigned long long" if "unsigned" in t else "signed long long"
elif "short" in t:
type[-1] = "unsigned short" if "unsigned" in t else "signed short"
elif "char" in t:
type[-1] = "unsigned char" if "unsigned" in t else "signed char" if "signed" in t else "char"
elif "int" in t or "signed" in t or "unsigned" in t:
type[-1] = "unsigned int" if "unsigned" in t else "signed int"
# Try to match the type to an already defined class...
self.ResolveIdentifiers(parent_block)
def ResolveIdentifiers(self, parent):
if isinstance(parent, Method):
parent = parent.parent
if self.type:
def __Search(tree, found, T):
qualifiedT = "::" + T
# need full qualification if the class is a subclass
if tree.full_name.startswith(parent.full_name + "::"):
if T.count("::") != tree.full_name.replace(parent.full_name, "").count("::"):
return
enum_match = [e for e in tree.enums if e.full_name.endswith(qualifiedT)]
typedef_match = [td for td in tree.typedefs if td.full_name.endswith(qualifiedT)]
class_match = [cl for cl in tree.classes if cl.full_name.endswith(qualifiedT)]
enumval_match = []
for en in tree.enums:
enumval_match += ([e for e in en.items if e.full_name.endswith(qualifiedT)])
template_match = []
if isinstance(tree, TemplateClass):
template_match = [t for t in tree.parameters if t.full_name.endswith(qualifiedT)]
found += enum_match + typedef_match + class_match + template_match + enumval_match
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
__Search(c, found, T)
if isinstance(tree, Namespace):
for n in tree.namespaces:
__Search(n, found, T)
# find the type to scan for...
typeIdx = len(self.type) - 1
cnt = 0
ref = 0
while self.type[typeIdx] in ["*", "&", "&&", "const", "volatile"]:
if self.type[typeIdx] == "*":
ref |= Ref.POINTER
elif self.type[typeIdx] == "&":
ref |= Ref.REFERENCE
elif self.type[typeIdx] == "&&":
ref |= Ref.RVALUE_REFERENCE
elif self.type[typeIdx] == "const":
ref |= Ref.CONST_POINTER
elif self.type[typeIdx] == "volatile":
ref |= Ref.VOLATILE_POINTER
typeIdx -= 1
# Skip template parsing here
if isinstance(self.type[typeIdx], str):
if self.type[typeIdx][0] == "<":
typeIdx -= 1
if isinstance(self.type[typeIdx], str):
i = typeIdx
type = self.type[i].split()[-1]
if type in ["float", "double"]:
self.type[i] = Type(Float(self.type[i]))
elif type in ["int", "char", "wchar_t", "char16_t", "char32_t", "short", "long", "signed", "unsigned",
"int8_t", "uint8_t", "int16_t", "uint16_t", "int32_t", "uint32_t", "int64_t", "uint64_t"]:
self.type[i] = Type(Integer(self.type[i]))
elif type == "bool":
self.type[i] = Type(Bool())
elif type == "void":
self.type[i] = Type(Void())
elif type == "string":
self.type[i] = Type(String())
elif type == "std::string":
self.type[i] = Type(String(True))
elif type == "__stubgen_integer":
self.type[i] = Type(BuiltinInteger(True))
elif type == "__stubgen_unspecified_integer":
self.type[i] = Type(BuiltinInteger(False))
else:
found = []
__Search(global_namespace, found, self.type[i])
if found:
# take closest match
found = found[-1]
if isinstance(found, TemplateClass):
# if we're pointing to a class template, then let's instantiate it!
self.type[i] = Type(found.Instantiate(self.type[i + 1], parent))
del self.type[i + 1]
else:
self.type[i] = found if isinstance(found, TemplateTypeParameter) else Type(found)
if isinstance(self.type[typeIdx], Type):
self.type[typeIdx].ref = ref
if isinstance(self.type[typeIdx], Type):
for i in range(len(self.type) - cnt - 1):
if self.type[i] == "const":
self.type[typeIdx].ref |= Ref.CONST
elif self.type[i] == "volatile":
self.type[typeIdx].ref |= Ref.VOLATILE
self.type = self.type[typeIdx]
def __str__(self):
return str(self.type) if self.type else ""
def __repr__(self):
return str(self)
def Type(self):
return self.type
def Proto(self):
return str(self.Type())
def Evaluate(identifiers_):
# Ensure scoped identifiers are kpt together
identifiers = ["?"]
for i, id in enumerate(identifiers_):
if id == "::" or identifiers[-1].endswith("::"):
identifiers[-1] += id
else:
identifiers.append(id)
del identifiers[0]
val = []
if identifiers:
for identifier in identifiers:
try:
identifier = identifier
except:
pass
try:
val.append(str(int(identifier, 16 if identifier[:2] == "0x" else 10)))
except:
def __Search(tree, found, T):
var_match = [v for v in tree.vars if v.full_name.endswith(T)]
enumerator_match = []
for e in tree.enums:
enumerator_match += [item for item in e.items if item.full_name.endswith(T)]
template_match = []
if (isinstance(tree, TemplateClass)):
template_match = [t for t in tree.arguments if t.full_name.endswith(T)]
found += var_match + enumerator_match + template_match
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
__Search(c, found, T)
if isinstance(tree, Namespace):
for n in tree.namespaces:
__Search(n, found, T)
found = []
__Search(global_namespace, found, "::" + identifier)
if found:
val.append(found[-1])
else:
val.append(str(identifier))
if not val:
val = identifiers
value = None
# attempt to parse the arithmetics...
try:
x = [str(v.value) if (isinstance(v, (Variable, Enumerator)) and v.value) else str(v) for v in val]
value = eval("".join(x))
except:
try:
value = eval("".join(val))
except:
value = val
return value
# Holds a name
class Name:
def __init__(self, parent_block, name=""):
if name:
ASSERT_ISVALID(name)
self.parent = parent_block
# come up with an unique name if none given
uniqueId = "__unnamed_" + self.__class__.__name__.lower() + "_" + uuid.uuid4().hex[:8]
parentName = "" if self.parent == None else self.parent.full_name
self.name = uniqueId if (not name and self.parent != None) else name
self.full_name = parentName + ("" if not self.name else "::" + self.name)
self.parser_file = CurrentFile()
self.parser_line = CurrentLine()
def Name(self):
return self.full_name
def ShortName(self):
return self.name
# Holds compound statements and composite types
class Block(Name):
def __init__(self, parent_block, name=""):
Name.__init__(self, parent_block, name)
self.vars = []
self.enums = []
self.typedefs = []
self.classes = []
self.unions = []
self.parser_file = CurrentFile()
self.parser_line = CurrentLine()
# Holds namespaces
class Namespace(Block):
def __init__(self, parent_block, name=""):
Block.__init__(self, parent_block, name)
self.namespaces = []
self.methods = []
self.omit = False
self.stub = -False
if self.parent != None: # case for global namespace
if isinstance(self.parent, Namespace):
self.parent.namespaces.append(self)
else:
raise ParserError("can't insert namespace '%s' into non-namespace block '%s'" %
(self.name, self.parent.name))
def Proto(self):
return self.name
def __str__(self):
return "namespace " + self.Proto()
def __repr__(self):
return self.__str__() if self.Proto else "global namespace"
# Holds a generic type, wraps fundamental and user-defined types with references and pointers
class Type:
def __init__(self, basetype):
self.type = basetype
self.ref = Ref.VALUE
def IsConst(self):
return self.ref & Ref.CONST != 0
def IsVolatile(self):
return self.ref & Ref.VOLATILE != 0
def IsPointer(self):
return self.ref & Ref.POINTER != 0
def IsConstPointer(self):
return self.ref & Ref.CONST_POINTER != 0
def IsVolatilePointer(self):
return self.ref & Ref.VOLATILE_POINTER != 0
def IsReference(self):
return self.ref & Ref.REFERENCE != 0
def IsRvalueReference(self):
return self.ref & Ref.RVALUE_REFERENCE
def IsPointerToConst(self):
return self.IsConst() and self.IsPointer()
def IsConstPointerToConst(self):
return self.IsConst() and self.IsConstPointer()
def IsConstReference(self):
return self.IsConst() and self.IsReference()
def IsValue(self):
return not self.IsPointer() and not self.IsReference() and not self.IsRvalueReference()
def IsNonConstReference(self):
return self.IsReference() and not self.IsConst()
def IsNonConstPointer(self):
return self.IsPointer() and not self.IsConst()
def IsFundamental(self):
return isinstance(self.type, Fundamental)
def IsIntrinsic(self):
return isinstance(self.type, Intrinsic)
def IsClass(self):
return isinstance(self.type, Class)
def Type(self):
return self.type
def TypeName(self):
return self.type.Proto()
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
_str = "const " if self.IsConst() else ""
_str += self.TypeName()
_str += "*" if self.IsPointer() else ""
_str += "&" if self.IsReference() else "&&" if self.IsRvalueReference() else ""
_str += " const" if self.IsConstPointer() else ""
_str += " volatile" if self.IsVolatilePointer() else ""
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
return "type " + str(self)
def TypeStr(s):
return str(Undefined(s, "/* undefined */ ")) if not isinstance(s, Type) else str(s)
def ValueStr(s):
return str(s) if isinstance(s, int) else str(Undefined(s, "/* unparsable */ ")) if not isinstance(s, str) else s
# Holds typedef definition
class Typedef(Identifier, Name):
def __init__(self, parent_block, string):
Identifier.__init__(self, parent_block, self, string, [])
Name.__init__(self, parent_block, self.name)
self.parent = parent_block
self.parent.typedefs.append(self)
self.is_event = False
self.is_iterator = self.parent.is_iterator if isinstance(self.parent, (Class, Typedef)) else False
def Proto(self):
return self.full_name
def __str__(self):
return "typedef %s %s" % (self.full_name, TypeStr(self.type))
def __repr__(self):
return "typedef %s [= %s]" % (self.full_name, TypeStr(self.type.type))
# Holds structs and classes
class Class(Identifier, Block):
def __init__(self, parent_block, name):
Identifier.__init__(self, parent_block, self, [name], [])
Block.__init__(self, parent_block, name)
self.type = self.full_name
self.specifiers = []
self.methods = []
self.classes = []
self.ancestors = [] # parent classes
self._current_access = "public"
self.omit = False
self.stub = False
self.is_json = False
self.is_event = False
self.is_extended = False
self.is_iterator = False
self.type_name = name
self.parent.classes.append(self)
def Proto(self):
return self.full_name
def __str__(self):
return "class " + self.Proto()
def __repr__(self):
astr = ""
if self.ancestors:
astr = " [<- " + ", ".join(str(a[0]) for a in self.ancestors) + "]"
return "class %s%s" % (self.full_name, astr)
# Holds unions
class Union(Identifier, Block):
def __init__(self, parent_block, name):
Identifier.__init__(self, parent_block, self, [name], [])
Block.__init__(self, parent_block, name)
self.methods = []
self.classes = []
self._current_access = "public"
self.omit = False
self.stub = False
self.parent.unions.append(self)
def __str__(self):
return "union " + self.full_name
def __repr__(self):
return str(self)
# Holds enumeration blocks, including class enums
class Enum(Identifier, Block):
def __init__(self, parent_block, name, is_scoped, type="int"):
Identifier.__init__(self, parent_block, self, [type, name], [])
Block.__init__(self, parent_block, name)
self.items = []
self.scoped = is_scoped
self.parent.enums.append(self)
self._last_value = 0 # used for auto-incrementation
def Proto(self):
return self.full_name
def __str__(self):
_str = ("enum " if not self.scoped else "enum class ")
_str += "%s : %s" % (self.Proto(), TypeStr(self.type))
return _str
def __repr__(self):
return str(self)
def SetValue(self, value):
self._last_value = value + 1
def GetValue(self):
return self._last_value
# Holds functions
class Function(Block, Name):
def __init__(self, parent_block, name, ret_type, valid_specifiers=["static", "extern", "inline"]):
self.specifiers = []
Block.__init__(self, parent_block, name if name else self.name)
Name.__init__(self, parent_block, self.name)
self.retval = Identifier(self, self, ret_type, valid_specifiers, False)
self.omit = False
self.stub = False
self.parent.methods.append(self)
def Proto(self):
_str = "static " if self.IsStatic() else ""
_str += TypeStr(self.retval.type) if self.retval.type else ""
_str += (" " if str(self.retval) else "") + self.name
_str += "(%s)" % (", ".join([str(v) for v in self.vars]))
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
return "function %s" % (self.name)
# Holds variables and constants
class Variable(Identifier, Name):
def __init__(self, parent_block, string, value=[], valid_specifiers=["static", "extern", "register"]):
Identifier.__init__(self, parent_block, self, string, valid_specifiers)
Name.__init__(self, parent_block, self.name)
self.value = Evaluate(value) if value else None
self.parent.vars.append(self)
def Proto(self):
return "%s %s" % (TypeStr(self.type), self.name)
def __str__(self):
return self.Proto()
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "variable %s %s '%s'%s" % (str(self.specifiers), TypeStr(self.type), str(self.name),
(" = " + value) if value else "")
class Parameter(Variable):
def __init__(self, parent_block, string, value=[], valid_specifiers=[]):
Variable.__init__(self, parent_block, string, value, valid_specifiers)
if self.name in parent_block.retval.meta.param:
self.meta.brief = parent_block.retval.meta.param[self.name]
def Proto(self):
return TypeStr(self.type)
def __str__(self):
return "%s %s" % (self.Proto(), self.name)
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "param %s '%s'%s" % (TypeStr(self.type), str(self.name), (" = " + value) if value else "")
# Holds member attributes
class Method(Function):
def __init__(self, parent_block, name, ret_type):
Function.__init__(self, parent_block, name, ret_type,
["inline", "static", "virtual", "explicit", "constexpr", "friend"])
self.access = self.parent._current_access
self.qualifiers = []
def IsVirtual(self):
return "virtual" in self.specifiers
def IsPureVirtual(self):
return "pure-virtual" in self.specifiers
def IsConst(self):
return "const" in self.qualifiers
def IsVolatile(self):
return "volatile" in self.qualifiers
def IsStatic(self):
return "static" in self.specifiers
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
_str = "virtual " if self.IsVirtual() else ""
_str += "static " if self.IsStatic() else ""
_str += TypeStr(self.retval.type) if self.retval.type else ""
_str += (" " if str(self.retval) else "") + self.name
_str += "(%s)" % (", ".join([v.Proto() for v in self.vars]))
_str += " " + self.CVString() if self.CVString() else ""
_str += " = 0" if self.IsPureVirtual() else ""
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
cv = " " + self.CVString() if self.CVString() else ""
return "method %s %s '%s' (%s)%s %s" % (self.access, TypeStr(self.type), self.name, ", ".join(
[str(v) for v in self.vars]), cv, str(self.specifiers))
class Destructor(Method):
def __init__(self, parent_block, name, ret_type):
Method.__init__(self, parent_block, name, ret_type)
def __repr__(self):
return "destructor %s '%s' %s" % (self.access, self.name, str(self.specifiers))
# Holds member attributes and constants
class Attribute(Variable):
def __init__(self, parent_block, string, value=[]):
Variable.__init__(self, parent_block, string, value, ["static", "constexpr", "thread_local", "mutable"])
self.access = self.parent._current_access
def IsConst(self):
return "const" in self.qualifiers
def IsVolatile(self):
return "volatile" in self.qualifiers
def IsStatic(self):
return "static" in self.specifiers
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
return "%s %s" % (TypeStr(self.type), str(self.name))
def __str__(self):
value = ValueStr(self.value) if self.value else None
return "%s %s" % (self.Proto(), (" = " + value) if value else "")
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "attribute %s %s %s '%s'%s" % (self.access, str(self.specifiers), TypeStr(self.type), str(self.name),
(" = " + value) if value else "")
# Holds enumeration items
class Enumerator(Identifier, Name):
def __init__(self, parent_block, name, value=None, type=["int"]):
parent_enum = parent_block if parent_block.scoped else parent_block.parent
Identifier.__init__(self, parent_enum, self, [type, name], [])
Name.__init__(self, parent_enum, self.name)
self.parent = parent_block
self.value = parent_block.GetValue() if value == None else Evaluate(value)
self.autoValue = (value == None)
if isinstance(self.value, (int)):
self.parent.SetValue(self.value)
self.parent.items.append(self)
def Proto(self):
return self.full_name
def __str__(self):
return "%s = %s" % (self.Proto(), ValueStr(self.value))
def __repr__(self):
return "enumerator %s '%s' = %s" % (TypeStr(self.type), str(self.full_name), ValueStr(self.value))
class TemplateNonTypeParameter(Variable):
def __init__(self, parent_block, string, index, value=[]):
Variable.__init__(self, parent_block, string, [])
self.value = Evaluate(value) if value else None
self.parent.arguments.append(self)
self.index = index
def __repr__(self):
return "non-type parameter %s '%s' [= %s]" % (TypeStr(self.type), self.name, str(self.value))
class TemplateTypeParameter(Name):
def __init__(self, parent_block, string, index):
Name.__init__(self, parent_block, string)
parent_block.parameters.append(self)
self.index = index
def Proto(self):
return self.name
def __str__(self):
return "typename %s" % self.Proto()
def __repr__(self):
return "type parameter %s" % self.Proto()
class InstantiatedTemplateClass(Class):
def __init__(self, parent_block, name, params, args):
hash = hashlib.sha1("_".join(args).encode('utf-8')).hexdigest()[:8].upper()
Class.__init__(self, parent_block, name + "Instance" + hash)
self.baseName = Name(parent_block, name)
self.params = params
self.args = args
self.resolvedArgs = [Identifier(parent_block, self, [x], []) for x in args]
self.type = self.TypeName()
def TypeName(self):
return "%s<%s>" % (self.baseName.full_name, ", ".join([str("".join(p.type) if isinstance(p.type, list) else p.type) for p in self.resolvedArgs]))
def Proto(self):
return self.TypeName()
def __str__(self):
s = []
for i, _ in enumerate(self.params):
s.append(self.params[i].name + " = " + str(self.args[i]))
_str = "template class %s<%s>" % (self.baseName.full_name, ", ".join([str(p) for p in self.params]))
_str += " [with %s]" % (", ".join(s))
return _str
def __repr__(self):
s = []
for i, _ in enumerate(self.params):
s.append(self.params[i].name + " = " + str(self.args[i]))
return "%s [instance of %s [with %s]]" % (Class.__repr__(self), self.TypeName(), ", ".join(s))
class TemplateClass(Class):
def ParseArguments(self, string):
groups = re.findall(r'<(?:[^<>]*|<[^<>]*>)*>', string)
if groups:
stringParams = [s.strip() for s in re.split(r',\s*(?![^<>]*\))', groups[0][1:-1].strip())]
return stringParams
else:
return []
def __init__(self, parent_block, name, params):
Class.__init__(self, parent_block, name)
self.parameters = []
self.arguments = []
self.paramList = []
paramList = self.ParseArguments(params)
for p in paramList:
if "typename" in p or "class" in p:
param = TemplateTypeParameter(self, p.split()[1], index=paramList.index(p))
else:
param = TemplateNonTypeParameter(self, p.split(), index=paramList.index(p))
self.paramList.append(param)
def Instantiate(self, arguments, parent):
def _Substitute(identifier):
if isinstance(identifier.type, list):
for i, v in enumerate(identifier.type):
if isinstance(v, TemplateTypeParameter):
if v.name in paramDict:
identifier.type[i] = strArgs[paramDict[v.name].index]
identifier.ResolveIdentifiers(instance)
break
if (isinstance(identifier, Enumerator) or isinstance(identifier, Variable)) and identifier.value:
for i, v in enumerate(identifier.value):
if isinstance(v, TemplateNonTypeParameter):
identifier.value[i] = strArgs[argDict[v.name].index]
identifier.value = Evaluate(identifier.value)
break
strArgs = self.ParseArguments(arguments)
paramDict = dict(zip([x.name for x in self.parameters], self.parameters))
argDict = dict(zip([x.name for x in self.arguments], self.arguments))
instance = InstantiatedTemplateClass(self.parent, self.name, self.paramList, strArgs)
instance.ancestors = self.ancestors
instance.specifiers = self.specifiers
instance.is_json = self.is_json
instance.is_extended = self.is_extended
instance.is_event = self.is_event
instance.is_iterator = self.is_iterator
for t in self.typedefs:
newTypedef = copy.copy(t)
newTypedef.parent = instance
newTypedef.type = copy.copy(t.type)
_Substitute(newTypedef)
instance.typedefs.append(newTypedef)
for v in self.vars:
newAttr = copy.copy(v)
newAttr.parent = instance
newAttr.type = copy.copy(v.type)
newAttr.value = copy.copy(v.value)
_Substitute(newAttr)
instance.typedefs.append(newAttr)
for e in self.enums:
newEnum = copy.copy(e)
newEnum.items = []
newEnum.parent = instance
_Substitute(newEnum)
for i in e.items:
newItem = copy.copy(i)
newItem.type = copy.copy(i.type)
newItem.value = copy.copy(i.value)
_Substitute(newItem)
newEnum.items.append(newItem)
instance.enums.append(newEnum)
for m in self.methods:
newMethod = copy.copy(m)
newMethod.vars = []
newMethod.parent = instance
if not isinstance(m, Destructor):
newMethod.retval = copy.copy(m.retval)
newMethod.retval.type = copy.copy(m.retval.type)
_Substitute(newMethod.retval)
for p in m.vars:
newVar = copy.copy(p)
newVar.type = copy.copy(p.type)
newVar.value = copy.copy(p.value)
_Substitute(newVar)
newMethod.vars.append(newVar)
instance.methods.append(newMethod)
return instance
def __str__(self):
return "template class %s<%s>" % (self.full_name, ", ".join([str(p) for p in self.paramList]))
def __repr__(self):
return "template %s<%s>" % (Class.__repr__(self), ", ".join([repr(p) for p in self.paramList]))
# -------------------------------------------------------------------------
# PRIVATE FUNCTIONS
# -------------------------------------------------------------------------
# Source file test into a list of tokens, removing comments and preprocessor directives.
def __Tokenize(contents):
global current_file
global current_line
tokens = [s.strip() for s in re.split(r"([\r\n])", contents, flags=re.MULTILINE) if s]
eoltokens = []
line = 1
inComment = 0
for token in tokens:
if token.startswith("// @_file:"):
line = 1
if token == '':
if not inComment:
eoltokens.append("// @_line:" + str(line) + " ")
line = line + 1
elif (len(eoltokens) > 1) and eoltokens[-2].endswith("\\"):
del eoltokens[-1]
eoltokens[-1] = eoltokens[-1][:-1] + token
else:
eoltokens.append(token)
inComment += eoltokens[-1].count("/*") - eoltokens[-1].count("*/")
contents = "\n".join(eoltokens)
formula = (
r"(#if 0[\S\s]*?#endif)"
r"|(#.*)" # preprocessor
r"|(/\*[\S\s]*?\*/)" # multi-line comments
r"|(//.*)" # single line comments
r"|(\"[^\"]+\")" # double quotes
r"|(\'[^\']+\')" # quotes
r"|(::)|(==)|(!=)|(>=)|(<=)|(&&)|(\|\|)" # two-char operators
r"|(\+\+)|(--)|(\+=)|(-=)|(/=)|(\*=)|(%=)|(^=)|(&=)|(\|=)|(~=)"
r"|([,:;~!?=^/*%-\+&<>\{\}\(\)\[\]])" # single-char operators
r"|([\r\n\t ])" # whitespace
)
tokens = [s.strip() for s in re.split(formula, contents, flags=(re.MULTILINE)) if s]
tagtokens = []
# check for special metadata within comments
skipmode = False
for token in tokens:
if token:
if skipmode:
if "@_file" in token:
skipmode = False
else:
continue
def __ParseLength(string, tag):
formula = (r"(\"[^\"]+\")"
r"|(\'[^\']+\')"
r"|(\*/)|(::)|(==)|(!=)|(>=)|(<=)|(&&)|(\|\|)"
r"|(\+\+)|(--)|(\+=)|(-=)|(/=)|(\*=)|(%=)|(^=)|(&=)|(\|=)|(~=)"
r"|([,:;~!?=^/*%-\+&<>\{\}\(\)\[\]])"
r"|([\r\n\t ])")
tagtokens.append(tag.upper())
length_str = string[string.index(tag) + len(tag):]
length_tokens = [
s.strip() for s in re.split(formula, length_str, flags=re.MULTILINE)
if isinstance(s, str) and len(s.strip())
]
if length_tokens[0] == ':':
length_tokens = length_tokens[1:]
no_close_last = (length_tokens[0] == '(')
tokens = []
par_count = 0
for t in length_tokens:
if t == '(':
if tokens:
tokens.append(t)
par_count += 1
elif t == ')':
par_count -= 1
if par_count == 0:
if not no_close_last:
tokens.append(t)
break
else:
tokens.append(t)
elif t == '*/' or t == "," or t[0] == '@':
break
else:
tokens.append(t)
if par_count == 0:
break
if par_count != 0:
raise ParserError("unmatched parenthesis in %s expression" % tag)
if len(tokens) == 0:
raise ParserError("invalid %s value" % tag)
return tokens
if ((token[:2] == "/*") and (token.count("/*") != token.count("*/"))):
raise ParserError("multi-line comment not closed")
if ((token[:2] == "/*") or (token[:2] == "//")):
def _find(word, string):
return re.compile(r"[ \r\n/\*]({0})([: \r\n\*]|$)".format(word)).search(string) != None
if _find("@stubgen", token):
if "@stubgen:skip" in token:
skipmode = True
elif "@stubgen:omit" in token:
tagtokens.append("@OMIT")
elif "@stubgen:stub" in token:
tagtokens.append("@STUB")
elif "@stubgen:include" in token:
pass # nothing to do here
else:
raise ParserError("invalid @stubgen tag")
if _find("@stop", token):
skipMode = True
if _find("@omit", token):
tagtokens.append("@OMIT")
if _find("@stub", token):
tagtokens.append("@STUB")
if _find("@in", token):
tagtokens.append("@IN")
if _find("@out", token):
tagtokens.append("@OUT")
if _find("@inout", token):
tagtokens.append("@IN")
tagtokens.append("@OUT")
if _find("@property", token):
tagtokens.append("@PROPERTY")
if _find("@deprecated", token):
tagtokens.append("@DEPRECATED")
if _find("@json", token):
tagtokens.append("@JSON")
if _find("@event", token):
tagtokens.append("@EVENT")
if _find("@extended", token):
tagtokens.append("@EXTENDED")
if _find("@iterator", token):
tagtokens.append("@ITERATOR")
if _find("@text", token):
tagtokens.append(__ParseLength(token, "@text"))
if _find("@length", token):
tagtokens.append(__ParseLength(token, "@length"))
if _find("@maxlength", token):
tagtokens.append(__ParseLength(token, "@maxlength"))
if _find("@interface", token):
tagtokens.append(__ParseLength(token, "@interface"))
def FindDoxyString(tag, hasParam, string, tagtokens):
def EndOfTag(string, start):
end_comment = string.find("*/", start)
next_tag = string.find("@", start)
end = None
if next_tag != -1 and end_comment != -1:
if next_tag < end_comment:
end = next_tag
elif end_comment != -1:
end = end_comment
return end
start = string.find(tag)
if (start != -1):
start += len(tag) + 1
desc = string[start:EndOfTag(token, start)].strip(" *\n")
if desc:
tagtokens.append(tag.upper())
if hasParam:
tagtokens.append(desc.split(" ",1)[0])
tagtokens.append(desc.split(" ",1)[1])
else:
tagtokens.append(desc)
FindDoxyString(tag, hasParam, string[start+1], tagtokens)
FindDoxyString("@brief", False, token, tagtokens)
FindDoxyString("@details", False, token, tagtokens)
FindDoxyString("@param", True, token, tagtokens)
FindDoxyString("@retval", True, token, tagtokens)
if _find("@_file", token):
idx = token.index("@_file:") + 7
tagtokens.append("@FILE:" + token[idx:])
current_file = token[idx:]
if _find("@_line", token):
idx = token.index("@_line:") + 7
if len(tagtokens) and not isinstance(tagtokens[-1],list) and tagtokens[-1].startswith("@LINE:"):
del tagtokens[-1]
current_line = int(token[idx:].split()[0])
tagtokens.append("@LINE:" + token[idx:])
elif len(token) > 0 and token[0] != '#' and token != "EXTERNAL":
tagtokens.append(token)
tagtokens.append(";") # prevent potential out-of-range errors
return tagtokens
# -------------------------------------------------------------------------
# EXPORTED FUNCTIONS
# -------------------------------------------------------------------------
i = 0
tokens = []
line_numbers = []
files = []
current_line = 0
current_file = "undefined"
def CurrentFile():
if i > 0:
# error during c++ parsing
return files[i]
else:
# error during preprocessing
return current_file
def CurrentLine():
if i > 0:
# error during c++ parsing
return line_numbers[i]
else:
# error during preprocessing
return current_line
# Builds a syntax tree (data structures only) of C++ source code
def Parse(contents):
# Start in global namespace.
global global_namespace
global current_file
global tokens
global line_numbers
global files
global i
i = 0
tokens = []
line_numbers = []
files = []
line_tokens = []
current_line = 0
current_file = "undefined"
# Split into tokens first
line_tokens = __Tokenize(contents)
for token in line_tokens:
if isinstance(token, str) and token.startswith("@LINE:"):
current_line = int(token[6:].split()[0])
elif isinstance(token, str) and token.startswith("@FILE:"):
current_file = token[6:]
tokens.append("@GLOBAL")
line_numbers.append(current_line)
files.append(current_file)
else:
tokens.append(token)
line_numbers.append(current_line)
files.append(current_file)
global_namespace = Namespace(None)
current_block = [global_namespace]
next_block = None
last_template_def = []
min_index = 0
omit_next = False
stub_next = False
json_next = False
event_next = False
extended_next = False
iterator_next = False
in_typedef = False
# Main loop.
while i < len(tokens):
# Handle special tokens
if not isinstance(tokens[i], str):
i += 1
continue
if tokens[i] == "@OMIT":
omit_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@STUB":
stub_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@JSON":
json_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@EVENT":
event_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@EXTENDED":
extended_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@ITERATOR":
iterator_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@GLOBAL":
current_block = [global_namespace]
next_block = None
last_template_def = []
min_index = 0
omit_next = False
stub_next = False
json_next = False
event_next = False
extended_next = False
iterator_next = False
in_typedef = False
tokens[i] = ";"
i += 1
# Swallow template definitions
elif tokens[i] == "template" and tokens[i + 1] == '<':
s = i
i += 1
nest = 0
while True:
if tokens[i] == ">":
if nest == 1:
break
nest -= 1
elif tokens[i] == "<":
nest += 1
i += 1
i += 1
last_template_def = tokens[s:i]
min_index = i
# Parse namespace definition...
elif tokens[i] == "namespace":
namespace_name = ""
if is_valid(tokens[i + 1]): # is there a namespace name?
namespace_name = tokens[i + 1]
i += 1
next_block = Namespace(current_block[-1], namespace_name)
i += 1
# Parse type alias...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "typedef":
j = i + 1
while tokens[j] != ";":
j += 1
typedef = Typedef(current_block[-1], tokens[i + 1:j])
if event_next:
typedef.is_event = True
event_next = False
if not isinstance(typedef.type, Type) and typedef.type[0] == "enum":
in_typedef = True
i += 1
else:
i = j + 1
# Parse "using"...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "using":
if tokens[i + 1] != "namespace" and tokens[i + 2] == "=":
i += 2
j = i + 1
while tokens[j] != ";":
j += 1
# reuse typedef class but correct name accordingly
if not current_block[-1].omit:
typedef = Typedef(current_block[-1], tokens[i + 1:j])
if event_next:
typedef.is_event = True
event_next = False
typedef_id = Name(current_block[-1], tokens[i - 1])
typedef.name = typedef_id.name
typedef.full_name = typedef_id.full_name
i = j + 1
elif tokens[i + 1] != "namespace" and tokens[i + 2] != "=":
if not current_block[-1].omit:
raise ParserError("using-declarations are not supported")
elif tokens[i + 1] == "namespace":
if not current_block[-1].omit:
raise ParserError("'using namespace' directives are not supported")
# Parse class definition...
elif (tokens[i] == "class") or (tokens[i] == "struct") or (tokens[i] == "union"):
name = tokens[i + 1]
if tokens[i] == "union":
new_class = Union(current_block[-1], name)
else:
new_class = None
# Look up previous classes in case there already was a forward declaration of this class
for idx, cl in enumerate(current_block[-1].classes):
if cl.name == name:
if len(cl.methods) == 0:
new_class = cl
# move the class to to bottom
current_block[-1].classes.append(new_class)
del current_block[-1].classes[idx]
else:
raise ParserError("duplicate class name: " + cl.name)
break
if new_class == None:
if last_template_def:
new_class = TemplateClass(current_block[-1], name, " ".join(last_template_def))
last_template_def = []
else:
new_class = Class(current_block[-1], name)
new_class._current_access = "private" if tokens[i] == "class" else "public"
if omit_next:
new_class.omit = True
omit_next = False
elif stub_next:
new_class.stub = True
stub_next = False
if json_next:
new_class.is_json = True
new_class.is_extended = extended_next
json_next = False
extended_next = False
if event_next:
new_class.is_event = True
new_class.is_extended = extended_next
event_next = False
extended_next = False
if iterator_next:
new_class.is_iterator = True
event_next = False
if last_template_def:
new_class.specifiers.append(" ".join(last_template_def))
last_template_def = []
i += 1
if tokens[i + 1] == "final":
new_class.specifiers.append(tokens[i + 2])
i += 1
# parse class ancestors...
# TODO: refactor!!
if tokens[i + 1] == ':':
i += 1
parent_class = ""
parent_access = "private"
specifiers = []
while True:
if tokens[i + 1] in ['{', ',']:
# try to find a reference to an already found type
parent_ref = Identifier(current_block[-1], current_block[-1], [parent_class], [])
new_class.ancestors.append([parent_ref.type, parent_access, specifiers])
parent_access = "private"
if tokens[i + 1] == '{':
break
elif tokens[i + 1] in ["public", "private", "protected"]:
parent_access = tokens[i + 1]
elif tokens[i + 1] == "virtual":
specifiers.append(tokens[i + 1])
else:
parent_class += tokens[i + 1]
i += 1
i += 1
if tokens[i] == ';':
i += 1
else:
next_block = new_class
# Parse enum definition...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "enum":
enum_name = ""
enum_type = "int"
is_scoped = False
if (tokens[i + 1] == "class") or (tokens[i + 1] == "struct"):
is_scoped = True
i += 1
if is_valid(tokens[i + 1]): # enum name given?
enum_name = tokens[i + 1]
i += 1
if tokens[i + 1] == ':':
enum_type = tokens[i + 2]
i += 2
new_enum = Enum(current_block[-1], enum_name, is_scoped, enum_type)
next_block = new_enum
i += 1
# Parse class access specifier...
elif isinstance(current_block[-1], Class) and tokens[i] == ':':
current_block[-1]._current_access = tokens[i - 1]
ASSERT_ISEXPECTED(current_block[-1]._current_access, ["private", "protected", "public"])
i += 1
# Parse function/method definition...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "(":
# concatenate tokens to handle operators and destructors
j = i - 1
k = i - 1
if isinstance(current_block[-1], Class) and (tokens[i - 2] == "operator"):
name = "operator" + tokens[i - 1]
j -= 1
k -= 1
else:
name = tokens[i - 1]
if tokens[i - 2] == '~':
name = "~" + name #dtor
j -= 1
k -= 1
# locate return value
while j >= min_index and tokens[j] not in ['{', '}', ';', ':']:
j -= 1
if not current_block[-1].omit and not omit_next:
ret_type = tokens[j + 1:k]
else:
ret_type = []
if isinstance(current_block[-1], Class):
if name[0] == "~":
method = Destructor(current_block[-1], name, ret_type)
else:
method = Method(current_block[-1], name, ret_type)
else:
method = Function(current_block[-1], name, ret_type)
if omit_next:
method.omit = True
omit_next = False
elif method.parent.omit:
method.omit = True
elif stub_next:
method.stub = True
stub_next = False
elif method.parent.stub:
method.stub = True
if last_template_def:
method.specifiers.append(" ".join(last_template_def))
last_template_def = []
# try to detect a function/macro call
function_call = not ret_type and ((name != current_block[-1].name) and (name !=
("~" + current_block[-1].name)))
# parse method parameters...
j = i
nest = 0
nest2 = 0
while tokens[i] != ')':
while tokens[j]:
if tokens[j] == '(':
nest += 1
elif tokens[j] == ')':
nest -= 1
if nest == 0 and nest2 == 0:
break
if tokens[j] == '<':
nest2 += 1
elif tokens[j] == '>':
nest2 -= 1
elif tokens[j] == ',' and nest == 1 and nest2 == 0:
break
j += 1
param = tokens[i + 1:j]
if len(param) and not (len(param) == 1 and param[0] == "void"): # remove C-style f(void)
value = []
if '=' in param:
assignment = param.index('=')
value = param[assignment + 1:]
param = param[0:assignment]
if not current_block[-1].omit and not method.omit:
Parameter(method, param, value)
i = j
j += 1
if nest:
raise ParserError("unmatched parenthesis '('")
if nest2:
raise ParserError("unmatched parenthesis '<'")
# parse post-declaration qualifiers/specifiers...
if isinstance(current_block[-1], Class):
while tokens[i] not in [';', '{', ':']:
# const, volatile
if tokens[i] in ["const", "volatile"]:
method.qualifiers.append(tokens[i])
# handle pure virtual methods
elif (tokens[i] == "="):
if tokens[i + 1] == "0" and "virtual" in method.specifiers: # mark the virtual function as pure
method.specifiers.append("pure-virtual")
elif tokens[i + 1] in ["default", "delete"]:
method.specifiers.append(tokens[i + 1])
i += 1
elif tokens[i] in ["override", "final", "noexcept"]:
method.specifiers.append(tokens[i])
i += 1
if function_call: # it was apparently a function call and not declaration, so remove it
current_block[-1].methods.pop()
else:
next_block = method
if tokens[i] == ';':
i += 1
elif tokens[i] == ':': # skip ctor initializers
while tokens[i] != '{':
i += 1
# Handle opening a compound block or a composite type
elif tokens[i] == '{':
current_block.append(next_block)
i += 1
# Handle closing a compound block/composite type
elif tokens[i] == '}':
if isinstance(current_block[-1], Class) and (tokens[i + 1] != ';'):
raise ParserError("definitions following a class declaration is not supported (%s)" %
current_block[-1].full_name)
if len(current_block) > 1:
current_block.pop()
else:
raise ParserError("unmatched brace '{'")
i += 1
next_block = Block(current_block[-1]) # new anonymous scope
# Parse variables and member attributes
elif isinstance(current_block[-1],
(Namespace, Class)) and tokens[i] == ';' and (is_valid(tokens[i - 1]) or tokens[i - 1] == "]"):
j = i - 1
while j >= min_index and tokens[j] not in ['{', '}', ';', ":"]:
j -= 1
if not current_block[-1].omit:
if isinstance(current_block[-1], Class):
Attribute(current_block[-1], tokens[j + 1:i])
else:
Variable(current_block[-1], tokens[j + 1:i])
i += 1
# Parse constants and member constants
elif isinstance(current_block[-1], (Namespace, Class)) and (tokens[i] == '=') and (tokens[i - 1] != "operator"):
j = i - 1
k = i + 1
while tokens[j] not in ['{', '}', ';', ":"]:
j -= 1
while tokens[k] != ';':
k += 1
if not current_block[-1].omit:
if isinstance(current_block[-1], Class):
Attribute(current_block[-1], tokens[j + 1:i], tokens[i + 1:k])
else:
Variable(current_block[-1], tokens[j + 1:i], tokens[i + 1:k])
i = k
# Parse an enum block...
elif isinstance(current_block[-1], Enum):
enum = current_block[-1]
j = i
while True:
if tokens[i] in ['}', ',']:
Enumerator(enum, tokens[j], tokens[j + 2:i] if tokens[j + 1] == '=' else None, enum.type)
if tokens[i + 1] == '}':
i += 1 # handle ,} situation
break
elif tokens[i] == '}':
break
else:
j = i + 1
i += 1
if in_typedef:
current_block[-2].typedefs[-1].type = Type(enum)
in_typedef = False
else:
i += 1
return global_namespace
# -------------------------------------------------------------------------
def ReadFile(source_file, includePaths, quiet=False, initial=""):
contents = initial
global current_file
try:
with open(source_file) as file:
file_content = file.read()
pos = 0
while True:
idx = file_content.find("@stubgen:include", pos)
if idx == -1:
idx = file_content.find("@insert", pos)
if idx != -1:
pos = idx + 1
match = re.search(r' \"(.+?)\"', file_content[idx:])
if match:
if match.group(1) != os.path.basename(os.path.realpath(source_file)):
tryPath = os.path.join(os.path.dirname(os.path.realpath(source_file)), match.group(1))
if os.path.isfile(tryPath):
prev = current_file
current_file = source_file
contents += ReadFile(tryPath, includePaths, False, contents)
current_file = prev
else:
raise LoaderError(source_file, "can't include '%s', file does not exist" % tryPath)
else:
raise LoaderError(source_file, "can't recursively include self")
else:
match = re.search(r' <(.+?)>', file_content[idx:])
if match:
found = False
for ipath in includePaths:
tryPath = os.path.join(ipath, match.group(1))
if os.path.isfile(tryPath):
prev = current_file
current_file = source_file
contents += ReadFile(tryPath, includePaths, True, contents)
current_file = prev
found = True
if not found:
raise LoaderError(source_file, "can't find '%s' in any of the include paths" % match.group(1))
else:
raise LoaderError(source_file, "syntax error at '%s'" % source_file)
else:
break
contents += "// @_file:%s\n" % source_file
contents += file_content
return contents
except FileNotFoundError:
if not quiet:
raise LoaderError(source_file, "failed to open file")
return ""
def ParseFile(source_file, includePaths = []):
contents = ReadFile(source_file, includePaths)
return Parse(contents)
def ParseFiles(source_files, includePaths = []):
contents = ""
for source_file in source_files:
if source_file:
quiet = (source_file[0] == "@")
contents += ReadFile((source_file[1:] if quiet else source_file), includePaths, quiet, "")
return Parse(contents)
# -------------------------------------------------------------------------
def DumpTree(tree, ind=0):
indent = ind * " "
if isinstance(tree, (Namespace, Class)):
print(indent + str(tree))
for td in tree.typedefs:
print(indent + 2 * " " + str(td))
for e in tree.enums:
print(indent + 2 * " " + str(e))
for item in e.items:
print(indent + 4 * " " + str(item))
for v in tree.vars:
print(indent + 2 * " " + str(v))
if isinstance(tree, (Namespace, Class)):
for m in tree.methods:
print(indent + 2 * " " + str(m))
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
DumpTree(c, ind + 2)
if isinstance(tree, Namespace):
for n in tree.namespaces:
DumpTree(n, ind + 2)
# -------------------------------------------------------------------------
# entry point
if __name__ == "__main__":
tree = ParseFile(sys.argv[1], sys.argv[2:])
if isinstance(tree, Namespace):
DumpTree(tree)
else:
print(tree)
| 36.566582 | 153 | 0.491837 |
# following copyright and licenses apply:
#
# Copyright 2020 Metrological
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# C++ header parser
#
import re, uuid, sys, copy, hashlib, os
from collections import OrderedDict
from enum import IntEnum
class ParserError(RuntimeError):
def __init__(self, msg):
msg = "%s(%s): parse error: %s" % (CurrentFile(), CurrentLine(), msg)
super(ParserError, self).__init__(msg)
class LoaderError(RuntimeError):
def __init__(self, file, msg):
msg = "%s: load error: %s" % (file, msg)
super(LoaderError, self).__init__(msg)
# Checks if identifier is valid.
def is_valid(token):
if "operator" in token:
return re.match(r'^[-\+~<>=!%&^*/\|\[\]]+$', token[8:])
else:
validChars = re.match(r'^[a-zA-Z0-9_~]+$', token)
return token and validChars and not token[0].isdigit()
def ASSERT_ISVALID(token):
if not is_valid(token):
raise ParserError("invalid identifier: '" + token + "'")
elif token in ["alignas", "alignof"]:
raise ParserError("alignment specifiers are not supported")
def ASSERT_ISEXPECTED(token, list):
if token not in list:
raise ParserError("unexpected identifier: '" + token + "', expected one of " + str(list))
# -------------------------------------------------------------------------
# CLASS DEFINITIONS
# -------------------------------------------------------------------------
global_namespace = None
class Ref(IntEnum):
VALUE = 1
POINTER = 2
REFERENCE = 4
RVALUE_REFERENCE = 8
CONST = 16,
VOLATILE = 32,
CONST_POINTER = 64,
VOLATILE_POINTER = 128,
class Metadata:
def __init__(self):
self.brief = ""
self.details = ""
self.input = False
self.output = False
self.is_property = False
self.is_deprecated = False
self.length = None
self.maxlength = None
self.interface = None
self.text = None
self.param = OrderedDict()
self.retval = OrderedDict()
class BaseType:
def __init__(self, type):
self.type = type
def Proto(self):
return self.type
def __str__(self):
return self.Proto()
class Undefined(BaseType):
def __init__(self, type, comment=""):
BaseType.__init__(self, type)
self.comment = comment
def Proto(self):
if isinstance(self.type, list):
if (type(self.type[0]) is str):
return self.comment + " ".join(self.type).replace(" < ", "<").replace(" :: ", "::").replace(
" >", ">").replace(" *", "*").replace(" &", "&").replace(" &&", "&&")
else:
return self.comment + " ".join([str(x) for x in self.type])
else:
return self.comment + str(self.type)
def __repr__(self):
return "undefined %s" % self.Proto()
class Fundamental(BaseType):
def __init__(self, type):
BaseType.__init__(self, type)
def __repr__(self):
return "fundamental %s" % self.type
class Intrinsic(BaseType):
def __init__(self, type):
BaseType.__init__(self, type)
def __repr__(self):
return "intrinsic %s" % self.type
class BuiltinInteger(Intrinsic):
def __init__(self, fixed_size = False):
Intrinsic.__init__(self, "builtin_integer")
self.fixed = fixed_size
def IsFixed(self):
return self.fixed
class String(Intrinsic):
def __init__(self, std=False):
Intrinsic.__init__(self, "std::string" if std else "string")
class Nullptr_t(Fundamental):
def __init__(self):
Fundamental.__init__(self, "std::nullptr_t")
class Void(Fundamental):
def __init__(self):
Fundamental.__init__(self, "void")
class Bool(Fundamental):
def __init__(self):
Fundamental.__init__(self, "bool")
class Integer(Fundamental):
def __init__(self, string):
Fundamental.__init__(self, string)
self.signed = "unsigned" not in self.type and "uint" not in self.type
if self.type == "char":
self.size = "char"
elif self.type == "wchar_t":
self.size = "wchar"
elif "8" in self.type:
self.size = "char"
elif "16" in self.type:
self.size = "short"
elif "32" in self.type:
self.size = "long"
elif "64" in self.type:
self.size = "long long"
else:
self.size = " ".join(self.type.split()[1:])
def IsFixed(self):
return self.size != "int"
class Float(Fundamental):
def __init__(self, string):
Fundamental.__init__(self, string)
# Holds identifier type
class Identifier():
def __init__(self, parent_block, parent, string, valid_specifiers, tags_allowed=True):
self.parent = parent_block
self.meta = Metadata()
parent.specifiers = []
self.name = ""
type = ["?"] # indexing safety
type_found = False
nest1 = 0
nest2 = 0
array = False
skip = 0
self.value = []
if string.count("*") > 1:
raise ParserError("pointers to pointers are not supported: '%s'" % (" ".join(string)))
elif string.count("[") > 1:
raise ParserError("multi-dimensional arrays are not supported: '%s'" % (" ".join(string)))
elif "[" in string and "*" in string:
raise ParserError("arrays of pointers are not supported: '%s'" % (" ".join(string)))
elif "&&" in string:
raise ParserError("rvalue references are not supported: '%s'" % (" ".join(string)))
for i, token in enumerate(string):
if not token:
continue
if isinstance(token, Type):
# encountered a token that has already been parsed
type.append(token)
type_found = True
continue
if skip > 0:
skip -= 1
continue
# just keep together anything that comes within <> or () brackets
# template arguments will be parsed when/if instantiated
if token == "(":
type[-1] += " ("
type_found = False
nest1 += 1
elif token == ")":
type[-1] += " )"
if nest1 == 0 and not nest2:
type_found = True
nest2 -= 1
elif token == "<":
type.append("<")
type_found = False
nest2 += 1
elif token == ">":
type[-1] += " >"
nest2 -= 1
if nest2 == 0 and not nest1:
type_found = True
elif nest1 or nest2:
type[-1] += " " + token
# handle pointer/reference markers
elif token[0] == "@":
if token[1:] == "IN":
if tags_allowed:
self.meta.input = True
else:
raise ParserError("in/out tags not allowed on return value")
elif token[1:] == "OUT":
if tags_allowed:
self.meta.output = True
else:
raise ParserError("in/out tags not allowed on return value")
elif token[1:] == "LENGTH":
self.meta.length = string[i + 1]
skip = 1
continue
elif token[1:] == "MAXLENGTH":
if tags_allowed:
self.meta.maxlength = string[i + 1]
else:
raise ParserError("maxlength tag not allowed on return value")
skip = 1
continue
elif token[1:] == "INTERFACE":
self.meta.interface = string[i + 1]
skip = 1
elif token[1:] == "PROPERTY":
self.meta.is_property = True
elif token[1:] == "BRIEF":
self.meta.brief = string[i + 1]
skip = 1
elif token[1:] == "DETAILS":
self.meta.details = string[i + 1]
skip = 1
elif token[1:] == "PARAM":
par = string[i + 1]
if par.endswith(":"):
par = par[:-1]
self.meta.param[par] = string[i + 2]
skip = 2
elif token[1:] == "RETVAL":
par = string[i + 1]
if par.endswith(":"):
par = par[:-1]
self.meta.retval[par] = string[i + 2]
skip = 2
elif token[1:] == "DEPRECATED":
self.meta.is_deprecated = True
elif token[1:] == "TEXT":
self.meta.text = "".join(string[i + 1])
skip = 1
else:
raise ParserError("invalid tag: " + token)
# skip C-style explicit struct
elif token in ["struct", "class", "union"]:
continue
elif token in ["export"]: # skip
continue
# keep identifers with scope operator together
elif token == "::":
if len(type) > 1:
type[-1] += "::"
type_found = False
# arrays are equivalent to pointers here, so make it uniform
# disregard anything that's inside the brackets
elif token == "[":
array = True
elif token == "]":
array = False
type.append("*")
elif token in ["*", "&"]:
type.append(token)
elif token in ["const", "volatile", "constexpr"]:
if token == "constexpr":
parent.specifiers.append("constexpr")
token = "const"
if "*" in type:
type.insert(type.index("*") + 1, token)
elif "&" in type:
type.insert(type.index("&") + 1, token)
else:
type.insert(1, token)
elif token in valid_specifiers:
parent.specifiers.append(token)
elif not type_found and not array:
if isinstance(type[-1], str):
if (token in ["int"]) and (type[-1].split()[-1] in ["signed", "unsigned", "short", "long"]):
type[-1] += " " + token
elif (token in ["char", "short", "long"]) and (type[-1].split()[-1] in ["signed", "unsigned"]):
type[-1] += " " + token
elif (token in ["long", "double"]) and (type[-1].split()[-1] in ["long"]):
type[-1] += " " + token
elif type[-1].endswith("::"):
type[-1] += token
elif nest1 == 0 and nest2 == 0:
type.append(token)
else:
type[-1] += token
if ((i == len(string) - 1) or (string[i + 1] not in ["char", "short", "long", "int", "double"])):
type_found = True
elif type_found:
if not array:
self.name = token
if array:
raise ParserError("unmatched bracket '['")
type = type[1:]
self.type = type
if type and isinstance(type[-1], str):
t = type[-1]
if t.split()[-1] in ["char", "short", "long", "int", "double", "float", "signed", "unsigned"]:
if "double" in t:
type[-1] = "long double" if "long" in t else "double"
elif "float" in t:
type[-1] = "float"
elif "long" in t:
if t.count("long") == 1:
type[-1] = "unsigned long" if "unsigned" in t else "signed long"
else:
type[-1] = "unsigned long long" if "unsigned" in t else "signed long long"
elif "short" in t:
type[-1] = "unsigned short" if "unsigned" in t else "signed short"
elif "char" in t:
type[-1] = "unsigned char" if "unsigned" in t else "signed char" if "signed" in t else "char"
elif "int" in t or "signed" in t or "unsigned" in t:
type[-1] = "unsigned int" if "unsigned" in t else "signed int"
self.ResolveIdentifiers(parent_block)
def ResolveIdentifiers(self, parent):
if isinstance(parent, Method):
parent = parent.parent
if self.type:
def __Search(tree, found, T):
qualifiedT = "::" + T
if tree.full_name.startswith(parent.full_name + "::"):
if T.count("::") != tree.full_name.replace(parent.full_name, "").count("::"):
return
enum_match = [e for e in tree.enums if e.full_name.endswith(qualifiedT)]
typedef_match = [td for td in tree.typedefs if td.full_name.endswith(qualifiedT)]
class_match = [cl for cl in tree.classes if cl.full_name.endswith(qualifiedT)]
enumval_match = []
for en in tree.enums:
enumval_match += ([e for e in en.items if e.full_name.endswith(qualifiedT)])
template_match = []
if isinstance(tree, TemplateClass):
template_match = [t for t in tree.parameters if t.full_name.endswith(qualifiedT)]
found += enum_match + typedef_match + class_match + template_match + enumval_match
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
__Search(c, found, T)
if isinstance(tree, Namespace):
for n in tree.namespaces:
__Search(n, found, T)
typeIdx = len(self.type) - 1
cnt = 0
ref = 0
while self.type[typeIdx] in ["*", "&", "&&", "const", "volatile"]:
if self.type[typeIdx] == "*":
ref |= Ref.POINTER
elif self.type[typeIdx] == "&":
ref |= Ref.REFERENCE
elif self.type[typeIdx] == "&&":
ref |= Ref.RVALUE_REFERENCE
elif self.type[typeIdx] == "const":
ref |= Ref.CONST_POINTER
elif self.type[typeIdx] == "volatile":
ref |= Ref.VOLATILE_POINTER
typeIdx -= 1
if isinstance(self.type[typeIdx], str):
if self.type[typeIdx][0] == "<":
typeIdx -= 1
if isinstance(self.type[typeIdx], str):
i = typeIdx
type = self.type[i].split()[-1]
if type in ["float", "double"]:
self.type[i] = Type(Float(self.type[i]))
elif type in ["int", "char", "wchar_t", "char16_t", "char32_t", "short", "long", "signed", "unsigned",
"int8_t", "uint8_t", "int16_t", "uint16_t", "int32_t", "uint32_t", "int64_t", "uint64_t"]:
self.type[i] = Type(Integer(self.type[i]))
elif type == "bool":
self.type[i] = Type(Bool())
elif type == "void":
self.type[i] = Type(Void())
elif type == "string":
self.type[i] = Type(String())
elif type == "std::string":
self.type[i] = Type(String(True))
elif type == "__stubgen_integer":
self.type[i] = Type(BuiltinInteger(True))
elif type == "__stubgen_unspecified_integer":
self.type[i] = Type(BuiltinInteger(False))
else:
found = []
__Search(global_namespace, found, self.type[i])
if found:
found = found[-1]
if isinstance(found, TemplateClass):
self.type[i] = Type(found.Instantiate(self.type[i + 1], parent))
del self.type[i + 1]
else:
self.type[i] = found if isinstance(found, TemplateTypeParameter) else Type(found)
if isinstance(self.type[typeIdx], Type):
self.type[typeIdx].ref = ref
if isinstance(self.type[typeIdx], Type):
for i in range(len(self.type) - cnt - 1):
if self.type[i] == "const":
self.type[typeIdx].ref |= Ref.CONST
elif self.type[i] == "volatile":
self.type[typeIdx].ref |= Ref.VOLATILE
self.type = self.type[typeIdx]
def __str__(self):
return str(self.type) if self.type else ""
def __repr__(self):
return str(self)
def Type(self):
return self.type
def Proto(self):
return str(self.Type())
def Evaluate(identifiers_):
identifiers = ["?"]
for i, id in enumerate(identifiers_):
if id == "::" or identifiers[-1].endswith("::"):
identifiers[-1] += id
else:
identifiers.append(id)
del identifiers[0]
val = []
if identifiers:
for identifier in identifiers:
try:
identifier = identifier
except:
pass
try:
val.append(str(int(identifier, 16 if identifier[:2] == "0x" else 10)))
except:
def __Search(tree, found, T):
var_match = [v for v in tree.vars if v.full_name.endswith(T)]
enumerator_match = []
for e in tree.enums:
enumerator_match += [item for item in e.items if item.full_name.endswith(T)]
template_match = []
if (isinstance(tree, TemplateClass)):
template_match = [t for t in tree.arguments if t.full_name.endswith(T)]
found += var_match + enumerator_match + template_match
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
__Search(c, found, T)
if isinstance(tree, Namespace):
for n in tree.namespaces:
__Search(n, found, T)
found = []
__Search(global_namespace, found, "::" + identifier)
if found:
val.append(found[-1])
else:
val.append(str(identifier))
if not val:
val = identifiers
value = None
try:
x = [str(v.value) if (isinstance(v, (Variable, Enumerator)) and v.value) else str(v) for v in val]
value = eval("".join(x))
except:
try:
value = eval("".join(val))
except:
value = val
return value
class Name:
def __init__(self, parent_block, name=""):
if name:
ASSERT_ISVALID(name)
self.parent = parent_block
uniqueId = "__unnamed_" + self.__class__.__name__.lower() + "_" + uuid.uuid4().hex[:8]
parentName = "" if self.parent == None else self.parent.full_name
self.name = uniqueId if (not name and self.parent != None) else name
self.full_name = parentName + ("" if not self.name else "::" + self.name)
self.parser_file = CurrentFile()
self.parser_line = CurrentLine()
def Name(self):
return self.full_name
def ShortName(self):
return self.name
class Block(Name):
def __init__(self, parent_block, name=""):
Name.__init__(self, parent_block, name)
self.vars = []
self.enums = []
self.typedefs = []
self.classes = []
self.unions = []
self.parser_file = CurrentFile()
self.parser_line = CurrentLine()
class Namespace(Block):
def __init__(self, parent_block, name=""):
Block.__init__(self, parent_block, name)
self.namespaces = []
self.methods = []
self.omit = False
self.stub = -False
if self.parent != None:
if isinstance(self.parent, Namespace):
self.parent.namespaces.append(self)
else:
raise ParserError("can't insert namespace '%s' into non-namespace block '%s'" %
(self.name, self.parent.name))
def Proto(self):
return self.name
def __str__(self):
return "namespace " + self.Proto()
def __repr__(self):
return self.__str__() if self.Proto else "global namespace"
# Holds a generic type, wraps fundamental and user-defined types with references and pointers
class Type:
def __init__(self, basetype):
self.type = basetype
self.ref = Ref.VALUE
def IsConst(self):
return self.ref & Ref.CONST != 0
def IsVolatile(self):
return self.ref & Ref.VOLATILE != 0
def IsPointer(self):
return self.ref & Ref.POINTER != 0
def IsConstPointer(self):
return self.ref & Ref.CONST_POINTER != 0
def IsVolatilePointer(self):
return self.ref & Ref.VOLATILE_POINTER != 0
def IsReference(self):
return self.ref & Ref.REFERENCE != 0
def IsRvalueReference(self):
return self.ref & Ref.RVALUE_REFERENCE
def IsPointerToConst(self):
return self.IsConst() and self.IsPointer()
def IsConstPointerToConst(self):
return self.IsConst() and self.IsConstPointer()
def IsConstReference(self):
return self.IsConst() and self.IsReference()
def IsValue(self):
return not self.IsPointer() and not self.IsReference() and not self.IsRvalueReference()
def IsNonConstReference(self):
return self.IsReference() and not self.IsConst()
def IsNonConstPointer(self):
return self.IsPointer() and not self.IsConst()
def IsFundamental(self):
return isinstance(self.type, Fundamental)
def IsIntrinsic(self):
return isinstance(self.type, Intrinsic)
def IsClass(self):
return isinstance(self.type, Class)
def Type(self):
return self.type
def TypeName(self):
return self.type.Proto()
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
_str = "const " if self.IsConst() else ""
_str += self.TypeName()
_str += "*" if self.IsPointer() else ""
_str += "&" if self.IsReference() else "&&" if self.IsRvalueReference() else ""
_str += " const" if self.IsConstPointer() else ""
_str += " volatile" if self.IsVolatilePointer() else ""
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
return "type " + str(self)
def TypeStr(s):
return str(Undefined(s, "/* undefined */ ")) if not isinstance(s, Type) else str(s)
def ValueStr(s):
return str(s) if isinstance(s, int) else str(Undefined(s, "/* unparsable */ ")) if not isinstance(s, str) else s
# Holds typedef definition
class Typedef(Identifier, Name):
def __init__(self, parent_block, string):
Identifier.__init__(self, parent_block, self, string, [])
Name.__init__(self, parent_block, self.name)
self.parent = parent_block
self.parent.typedefs.append(self)
self.is_event = False
self.is_iterator = self.parent.is_iterator if isinstance(self.parent, (Class, Typedef)) else False
def Proto(self):
return self.full_name
def __str__(self):
return "typedef %s %s" % (self.full_name, TypeStr(self.type))
def __repr__(self):
return "typedef %s [= %s]" % (self.full_name, TypeStr(self.type.type))
# Holds structs and classes
class Class(Identifier, Block):
def __init__(self, parent_block, name):
Identifier.__init__(self, parent_block, self, [name], [])
Block.__init__(self, parent_block, name)
self.type = self.full_name
self.specifiers = []
self.methods = []
self.classes = []
self.ancestors = [] # parent classes
self._current_access = "public"
self.omit = False
self.stub = False
self.is_json = False
self.is_event = False
self.is_extended = False
self.is_iterator = False
self.type_name = name
self.parent.classes.append(self)
def Proto(self):
return self.full_name
def __str__(self):
return "class " + self.Proto()
def __repr__(self):
astr = ""
if self.ancestors:
astr = " [<- " + ", ".join(str(a[0]) for a in self.ancestors) + "]"
return "class %s%s" % (self.full_name, astr)
# Holds unions
class Union(Identifier, Block):
def __init__(self, parent_block, name):
Identifier.__init__(self, parent_block, self, [name], [])
Block.__init__(self, parent_block, name)
self.methods = []
self.classes = []
self._current_access = "public"
self.omit = False
self.stub = False
self.parent.unions.append(self)
def __str__(self):
return "union " + self.full_name
def __repr__(self):
return str(self)
# Holds enumeration blocks, including class enums
class Enum(Identifier, Block):
def __init__(self, parent_block, name, is_scoped, type="int"):
Identifier.__init__(self, parent_block, self, [type, name], [])
Block.__init__(self, parent_block, name)
self.items = []
self.scoped = is_scoped
self.parent.enums.append(self)
self._last_value = 0 # used for auto-incrementation
def Proto(self):
return self.full_name
def __str__(self):
_str = ("enum " if not self.scoped else "enum class ")
_str += "%s : %s" % (self.Proto(), TypeStr(self.type))
return _str
def __repr__(self):
return str(self)
def SetValue(self, value):
self._last_value = value + 1
def GetValue(self):
return self._last_value
# Holds functions
class Function(Block, Name):
def __init__(self, parent_block, name, ret_type, valid_specifiers=["static", "extern", "inline"]):
self.specifiers = []
Block.__init__(self, parent_block, name if name else self.name)
Name.__init__(self, parent_block, self.name)
self.retval = Identifier(self, self, ret_type, valid_specifiers, False)
self.omit = False
self.stub = False
self.parent.methods.append(self)
def Proto(self):
_str = "static " if self.IsStatic() else ""
_str += TypeStr(self.retval.type) if self.retval.type else ""
_str += (" " if str(self.retval) else "") + self.name
_str += "(%s)" % (", ".join([str(v) for v in self.vars]))
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
return "function %s" % (self.name)
# Holds variables and constants
class Variable(Identifier, Name):
def __init__(self, parent_block, string, value=[], valid_specifiers=["static", "extern", "register"]):
Identifier.__init__(self, parent_block, self, string, valid_specifiers)
Name.__init__(self, parent_block, self.name)
self.value = Evaluate(value) if value else None
self.parent.vars.append(self)
def Proto(self):
return "%s %s" % (TypeStr(self.type), self.name)
def __str__(self):
return self.Proto()
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "variable %s %s '%s'%s" % (str(self.specifiers), TypeStr(self.type), str(self.name),
(" = " + value) if value else "")
class Parameter(Variable):
def __init__(self, parent_block, string, value=[], valid_specifiers=[]):
Variable.__init__(self, parent_block, string, value, valid_specifiers)
if self.name in parent_block.retval.meta.param:
self.meta.brief = parent_block.retval.meta.param[self.name]
def Proto(self):
return TypeStr(self.type)
def __str__(self):
return "%s %s" % (self.Proto(), self.name)
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "param %s '%s'%s" % (TypeStr(self.type), str(self.name), (" = " + value) if value else "")
# Holds member attributes
class Method(Function):
def __init__(self, parent_block, name, ret_type):
Function.__init__(self, parent_block, name, ret_type,
["inline", "static", "virtual", "explicit", "constexpr", "friend"])
self.access = self.parent._current_access
self.qualifiers = []
def IsVirtual(self):
return "virtual" in self.specifiers
def IsPureVirtual(self):
return "pure-virtual" in self.specifiers
def IsConst(self):
return "const" in self.qualifiers
def IsVolatile(self):
return "volatile" in self.qualifiers
def IsStatic(self):
return "static" in self.specifiers
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
_str = "virtual " if self.IsVirtual() else ""
_str += "static " if self.IsStatic() else ""
_str += TypeStr(self.retval.type) if self.retval.type else ""
_str += (" " if str(self.retval) else "") + self.name
_str += "(%s)" % (", ".join([v.Proto() for v in self.vars]))
_str += " " + self.CVString() if self.CVString() else ""
_str += " = 0" if self.IsPureVirtual() else ""
return _str
def __str__(self):
return self.Proto()
def __repr__(self):
cv = " " + self.CVString() if self.CVString() else ""
return "method %s %s '%s' (%s)%s %s" % (self.access, TypeStr(self.type), self.name, ", ".join(
[str(v) for v in self.vars]), cv, str(self.specifiers))
class Destructor(Method):
def __init__(self, parent_block, name, ret_type):
Method.__init__(self, parent_block, name, ret_type)
def __repr__(self):
return "destructor %s '%s' %s" % (self.access, self.name, str(self.specifiers))
# Holds member attributes and constants
class Attribute(Variable):
def __init__(self, parent_block, string, value=[]):
Variable.__init__(self, parent_block, string, value, ["static", "constexpr", "thread_local", "mutable"])
self.access = self.parent._current_access
def IsConst(self):
return "const" in self.qualifiers
def IsVolatile(self):
return "volatile" in self.qualifiers
def IsStatic(self):
return "static" in self.specifiers
def CVString(self):
str = "const" if self.IsConst() else ""
str += " " if self.IsConst() and self.IsVolatile() else ""
str += "volatile" if self.IsVolatile() else ""
return str
def Proto(self):
return "%s %s" % (TypeStr(self.type), str(self.name))
def __str__(self):
value = ValueStr(self.value) if self.value else None
return "%s %s" % (self.Proto(), (" = " + value) if value else "")
def __repr__(self):
value = ValueStr(self.value) if self.value else None
return "attribute %s %s %s '%s'%s" % (self.access, str(self.specifiers), TypeStr(self.type), str(self.name),
(" = " + value) if value else "")
# Holds enumeration items
class Enumerator(Identifier, Name):
def __init__(self, parent_block, name, value=None, type=["int"]):
parent_enum = parent_block if parent_block.scoped else parent_block.parent
Identifier.__init__(self, parent_enum, self, [type, name], [])
Name.__init__(self, parent_enum, self.name)
self.parent = parent_block
self.value = parent_block.GetValue() if value == None else Evaluate(value)
self.autoValue = (value == None)
if isinstance(self.value, (int)):
self.parent.SetValue(self.value)
self.parent.items.append(self)
def Proto(self):
return self.full_name
def __str__(self):
return "%s = %s" % (self.Proto(), ValueStr(self.value))
def __repr__(self):
return "enumerator %s '%s' = %s" % (TypeStr(self.type), str(self.full_name), ValueStr(self.value))
class TemplateNonTypeParameter(Variable):
def __init__(self, parent_block, string, index, value=[]):
Variable.__init__(self, parent_block, string, [])
self.value = Evaluate(value) if value else None
self.parent.arguments.append(self)
self.index = index
def __repr__(self):
return "non-type parameter %s '%s' [= %s]" % (TypeStr(self.type), self.name, str(self.value))
class TemplateTypeParameter(Name):
def __init__(self, parent_block, string, index):
Name.__init__(self, parent_block, string)
parent_block.parameters.append(self)
self.index = index
def Proto(self):
return self.name
def __str__(self):
return "typename %s" % self.Proto()
def __repr__(self):
return "type parameter %s" % self.Proto()
class InstantiatedTemplateClass(Class):
def __init__(self, parent_block, name, params, args):
hash = hashlib.sha1("_".join(args).encode('utf-8')).hexdigest()[:8].upper()
Class.__init__(self, parent_block, name + "Instance" + hash)
self.baseName = Name(parent_block, name)
self.params = params
self.args = args
self.resolvedArgs = [Identifier(parent_block, self, [x], []) for x in args]
self.type = self.TypeName()
def TypeName(self):
return "%s<%s>" % (self.baseName.full_name, ", ".join([str("".join(p.type) if isinstance(p.type, list) else p.type) for p in self.resolvedArgs]))
def Proto(self):
return self.TypeName()
def __str__(self):
s = []
for i, _ in enumerate(self.params):
s.append(self.params[i].name + " = " + str(self.args[i]))
_str = "template class %s<%s>" % (self.baseName.full_name, ", ".join([str(p) for p in self.params]))
_str += " [with %s]" % (", ".join(s))
return _str
def __repr__(self):
s = []
for i, _ in enumerate(self.params):
s.append(self.params[i].name + " = " + str(self.args[i]))
return "%s [instance of %s [with %s]]" % (Class.__repr__(self), self.TypeName(), ", ".join(s))
class TemplateClass(Class):
def ParseArguments(self, string):
groups = re.findall(r'<(?:[^<>]*|<[^<>]*>)*>', string)
if groups:
stringParams = [s.strip() for s in re.split(r',\s*(?![^<>]*\))', groups[0][1:-1].strip())]
return stringParams
else:
return []
def __init__(self, parent_block, name, params):
Class.__init__(self, parent_block, name)
self.parameters = []
self.arguments = []
self.paramList = []
paramList = self.ParseArguments(params)
for p in paramList:
if "typename" in p or "class" in p:
param = TemplateTypeParameter(self, p.split()[1], index=paramList.index(p))
else:
param = TemplateNonTypeParameter(self, p.split(), index=paramList.index(p))
self.paramList.append(param)
def Instantiate(self, arguments, parent):
def _Substitute(identifier):
if isinstance(identifier.type, list):
for i, v in enumerate(identifier.type):
if isinstance(v, TemplateTypeParameter):
if v.name in paramDict:
identifier.type[i] = strArgs[paramDict[v.name].index]
identifier.ResolveIdentifiers(instance)
break
if (isinstance(identifier, Enumerator) or isinstance(identifier, Variable)) and identifier.value:
for i, v in enumerate(identifier.value):
if isinstance(v, TemplateNonTypeParameter):
identifier.value[i] = strArgs[argDict[v.name].index]
identifier.value = Evaluate(identifier.value)
break
strArgs = self.ParseArguments(arguments)
paramDict = dict(zip([x.name for x in self.parameters], self.parameters))
argDict = dict(zip([x.name for x in self.arguments], self.arguments))
instance = InstantiatedTemplateClass(self.parent, self.name, self.paramList, strArgs)
instance.ancestors = self.ancestors
instance.specifiers = self.specifiers
instance.is_json = self.is_json
instance.is_extended = self.is_extended
instance.is_event = self.is_event
instance.is_iterator = self.is_iterator
for t in self.typedefs:
newTypedef = copy.copy(t)
newTypedef.parent = instance
newTypedef.type = copy.copy(t.type)
_Substitute(newTypedef)
instance.typedefs.append(newTypedef)
for v in self.vars:
newAttr = copy.copy(v)
newAttr.parent = instance
newAttr.type = copy.copy(v.type)
newAttr.value = copy.copy(v.value)
_Substitute(newAttr)
instance.typedefs.append(newAttr)
for e in self.enums:
newEnum = copy.copy(e)
newEnum.items = []
newEnum.parent = instance
_Substitute(newEnum)
for i in e.items:
newItem = copy.copy(i)
newItem.type = copy.copy(i.type)
newItem.value = copy.copy(i.value)
_Substitute(newItem)
newEnum.items.append(newItem)
instance.enums.append(newEnum)
for m in self.methods:
newMethod = copy.copy(m)
newMethod.vars = []
newMethod.parent = instance
if not isinstance(m, Destructor):
newMethod.retval = copy.copy(m.retval)
newMethod.retval.type = copy.copy(m.retval.type)
_Substitute(newMethod.retval)
for p in m.vars:
newVar = copy.copy(p)
newVar.type = copy.copy(p.type)
newVar.value = copy.copy(p.value)
_Substitute(newVar)
newMethod.vars.append(newVar)
instance.methods.append(newMethod)
return instance
def __str__(self):
return "template class %s<%s>" % (self.full_name, ", ".join([str(p) for p in self.paramList]))
def __repr__(self):
return "template %s<%s>" % (Class.__repr__(self), ", ".join([repr(p) for p in self.paramList]))
# -------------------------------------------------------------------------
# PRIVATE FUNCTIONS
# -------------------------------------------------------------------------
# Source file test into a list of tokens, removing comments and preprocessor directives.
def __Tokenize(contents):
global current_file
global current_line
tokens = [s.strip() for s in re.split(r"([\r\n])", contents, flags=re.MULTILINE) if s]
eoltokens = []
line = 1
inComment = 0
for token in tokens:
if token.startswith("// @_file:"):
line = 1
if token == '':
if not inComment:
eoltokens.append("// @_line:" + str(line) + " ")
line = line + 1
elif (len(eoltokens) > 1) and eoltokens[-2].endswith("\\"):
del eoltokens[-1]
eoltokens[-1] = eoltokens[-1][:-1] + token
else:
eoltokens.append(token)
inComment += eoltokens[-1].count("/*") - eoltokens[-1].count("*/")
contents = "\n".join(eoltokens)
formula = (
r"(#if 0[\S\s]*?#endif)"
r"|(#.*)" # preprocessor
r"|(/\*[\S\s]*?\*/)" # multi-line comments
r"|(//.*)" # single line comments
r"|(\"[^\"]+\")" # double quotes
r"|(\'[^\']+\')" # quotes
r"|(::)|(==)|(!=)|(>=)|(<=)|(&&)|(\|\|)" # two-char operators
r"|(\+\+)|(--)|(\+=)|(-=)|(/=)|(\*=)|(%=)|(^=)|(&=)|(\|=)|(~=)"
r"|([,:;~!?=^/*%-\+&<>\{\}\(\)\[\]])" # single-char operators
r"|([\r\n\t ])" # whitespace
)
tokens = [s.strip() for s in re.split(formula, contents, flags=(re.MULTILINE)) if s]
tagtokens = []
# check for special metadata within comments
skipmode = False
for token in tokens:
if token:
if skipmode:
if "@_file" in token:
skipmode = False
else:
continue
def __ParseLength(string, tag):
formula = (r"(\"[^\"]+\")"
r"|(\'[^\']+\')"
r"|(\*/)|(::)|(==)|(!=)|(>=)|(<=)|(&&)|(\|\|)"
r"|(\+\+)|(--)|(\+=)|(-=)|(/=)|(\*=)|(%=)|(^=)|(&=)|(\|=)|(~=)"
r"|([,:;~!?=^/*%-\+&<>\{\}\(\)\[\]])"
r"|([\r\n\t ])")
tagtokens.append(tag.upper())
length_str = string[string.index(tag) + len(tag):]
length_tokens = [
s.strip() for s in re.split(formula, length_str, flags=re.MULTILINE)
if isinstance(s, str) and len(s.strip())
]
if length_tokens[0] == ':':
length_tokens = length_tokens[1:]
no_close_last = (length_tokens[0] == '(')
tokens = []
par_count = 0
for t in length_tokens:
if t == '(':
if tokens:
tokens.append(t)
par_count += 1
elif t == ')':
par_count -= 1
if par_count == 0:
if not no_close_last:
tokens.append(t)
break
else:
tokens.append(t)
elif t == '*/' or t == "," or t[0] == '@':
break
else:
tokens.append(t)
if par_count == 0:
break
if par_count != 0:
raise ParserError("unmatched parenthesis in %s expression" % tag)
if len(tokens) == 0:
raise ParserError("invalid %s value" % tag)
return tokens
if ((token[:2] == "/*") and (token.count("/*") != token.count("*/"))):
raise ParserError("multi-line comment not closed")
if ((token[:2] == "/*") or (token[:2] == "//")):
def _find(word, string):
return re.compile(r"[ \r\n/\*]({0})([: \r\n\*]|$)".format(word)).search(string) != None
if _find("@stubgen", token):
if "@stubgen:skip" in token:
skipmode = True
elif "@stubgen:omit" in token:
tagtokens.append("@OMIT")
elif "@stubgen:stub" in token:
tagtokens.append("@STUB")
elif "@stubgen:include" in token:
pass # nothing to do here
else:
raise ParserError("invalid @stubgen tag")
if _find("@stop", token):
skipMode = True
if _find("@omit", token):
tagtokens.append("@OMIT")
if _find("@stub", token):
tagtokens.append("@STUB")
if _find("@in", token):
tagtokens.append("@IN")
if _find("@out", token):
tagtokens.append("@OUT")
if _find("@inout", token):
tagtokens.append("@IN")
tagtokens.append("@OUT")
if _find("@property", token):
tagtokens.append("@PROPERTY")
if _find("@deprecated", token):
tagtokens.append("@DEPRECATED")
if _find("@json", token):
tagtokens.append("@JSON")
if _find("@event", token):
tagtokens.append("@EVENT")
if _find("@extended", token):
tagtokens.append("@EXTENDED")
if _find("@iterator", token):
tagtokens.append("@ITERATOR")
if _find("@text", token):
tagtokens.append(__ParseLength(token, "@text"))
if _find("@length", token):
tagtokens.append(__ParseLength(token, "@length"))
if _find("@maxlength", token):
tagtokens.append(__ParseLength(token, "@maxlength"))
if _find("@interface", token):
tagtokens.append(__ParseLength(token, "@interface"))
def FindDoxyString(tag, hasParam, string, tagtokens):
def EndOfTag(string, start):
end_comment = string.find("*/", start)
next_tag = string.find("@", start)
end = None
if next_tag != -1 and end_comment != -1:
if next_tag < end_comment:
end = next_tag
elif end_comment != -1:
end = end_comment
return end
start = string.find(tag)
if (start != -1):
start += len(tag) + 1
desc = string[start:EndOfTag(token, start)].strip(" *\n")
if desc:
tagtokens.append(tag.upper())
if hasParam:
tagtokens.append(desc.split(" ",1)[0])
tagtokens.append(desc.split(" ",1)[1])
else:
tagtokens.append(desc)
FindDoxyString(tag, hasParam, string[start+1], tagtokens)
FindDoxyString("@brief", False, token, tagtokens)
FindDoxyString("@details", False, token, tagtokens)
FindDoxyString("@param", True, token, tagtokens)
FindDoxyString("@retval", True, token, tagtokens)
if _find("@_file", token):
idx = token.index("@_file:") + 7
tagtokens.append("@FILE:" + token[idx:])
current_file = token[idx:]
if _find("@_line", token):
idx = token.index("@_line:") + 7
if len(tagtokens) and not isinstance(tagtokens[-1],list) and tagtokens[-1].startswith("@LINE:"):
del tagtokens[-1]
current_line = int(token[idx:].split()[0])
tagtokens.append("@LINE:" + token[idx:])
elif len(token) > 0 and token[0] != '
tagtokens.append(token)
tagtokens.append(";") # prevent potential out-of-range errors
return tagtokens
# -------------------------------------------------------------------------
# EXPORTED FUNCTIONS
# -------------------------------------------------------------------------
i = 0
tokens = []
line_numbers = []
files = []
current_line = 0
current_file = "undefined"
def CurrentFile():
if i > 0:
# error during c++ parsing
return files[i]
else:
# error during preprocessing
return current_file
def CurrentLine():
if i > 0:
# error during c++ parsing
return line_numbers[i]
else:
# error during preprocessing
return current_line
# Builds a syntax tree (data structures only) of C++ source code
def Parse(contents):
# Start in global namespace.
global global_namespace
global current_file
global tokens
global line_numbers
global files
global i
i = 0
tokens = []
line_numbers = []
files = []
line_tokens = []
current_line = 0
current_file = "undefined"
# Split into tokens first
line_tokens = __Tokenize(contents)
for token in line_tokens:
if isinstance(token, str) and token.startswith("@LINE:"):
current_line = int(token[6:].split()[0])
elif isinstance(token, str) and token.startswith("@FILE:"):
current_file = token[6:]
tokens.append("@GLOBAL")
line_numbers.append(current_line)
files.append(current_file)
else:
tokens.append(token)
line_numbers.append(current_line)
files.append(current_file)
global_namespace = Namespace(None)
current_block = [global_namespace]
next_block = None
last_template_def = []
min_index = 0
omit_next = False
stub_next = False
json_next = False
event_next = False
extended_next = False
iterator_next = False
in_typedef = False
# Main loop.
while i < len(tokens):
# Handle special tokens
if not isinstance(tokens[i], str):
i += 1
continue
if tokens[i] == "@OMIT":
omit_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@STUB":
stub_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@JSON":
json_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@EVENT":
event_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@EXTENDED":
extended_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@ITERATOR":
iterator_next = True
tokens[i] = ";"
i += 1
elif tokens[i] == "@GLOBAL":
current_block = [global_namespace]
next_block = None
last_template_def = []
min_index = 0
omit_next = False
stub_next = False
json_next = False
event_next = False
extended_next = False
iterator_next = False
in_typedef = False
tokens[i] = ";"
i += 1
# Swallow template definitions
elif tokens[i] == "template" and tokens[i + 1] == '<':
s = i
i += 1
nest = 0
while True:
if tokens[i] == ">":
if nest == 1:
break
nest -= 1
elif tokens[i] == "<":
nest += 1
i += 1
i += 1
last_template_def = tokens[s:i]
min_index = i
# Parse namespace definition...
elif tokens[i] == "namespace":
namespace_name = ""
if is_valid(tokens[i + 1]): # is there a namespace name?
namespace_name = tokens[i + 1]
i += 1
next_block = Namespace(current_block[-1], namespace_name)
i += 1
# Parse type alias...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "typedef":
j = i + 1
while tokens[j] != ";":
j += 1
typedef = Typedef(current_block[-1], tokens[i + 1:j])
if event_next:
typedef.is_event = True
event_next = False
if not isinstance(typedef.type, Type) and typedef.type[0] == "enum":
in_typedef = True
i += 1
else:
i = j + 1
# Parse "using"...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "using":
if tokens[i + 1] != "namespace" and tokens[i + 2] == "=":
i += 2
j = i + 1
while tokens[j] != ";":
j += 1
# reuse typedef class but correct name accordingly
if not current_block[-1].omit:
typedef = Typedef(current_block[-1], tokens[i + 1:j])
if event_next:
typedef.is_event = True
event_next = False
typedef_id = Name(current_block[-1], tokens[i - 1])
typedef.name = typedef_id.name
typedef.full_name = typedef_id.full_name
i = j + 1
elif tokens[i + 1] != "namespace" and tokens[i + 2] != "=":
if not current_block[-1].omit:
raise ParserError("using-declarations are not supported")
elif tokens[i + 1] == "namespace":
if not current_block[-1].omit:
raise ParserError("'using namespace' directives are not supported")
# Parse class definition...
elif (tokens[i] == "class") or (tokens[i] == "struct") or (tokens[i] == "union"):
name = tokens[i + 1]
if tokens[i] == "union":
new_class = Union(current_block[-1], name)
else:
new_class = None
# Look up previous classes in case there already was a forward declaration of this class
for idx, cl in enumerate(current_block[-1].classes):
if cl.name == name:
if len(cl.methods) == 0:
new_class = cl
# move the class to to bottom
current_block[-1].classes.append(new_class)
del current_block[-1].classes[idx]
else:
raise ParserError("duplicate class name: " + cl.name)
break
if new_class == None:
if last_template_def:
new_class = TemplateClass(current_block[-1], name, " ".join(last_template_def))
last_template_def = []
else:
new_class = Class(current_block[-1], name)
new_class._current_access = "private" if tokens[i] == "class" else "public"
if omit_next:
new_class.omit = True
omit_next = False
elif stub_next:
new_class.stub = True
stub_next = False
if json_next:
new_class.is_json = True
new_class.is_extended = extended_next
json_next = False
extended_next = False
if event_next:
new_class.is_event = True
new_class.is_extended = extended_next
event_next = False
extended_next = False
if iterator_next:
new_class.is_iterator = True
event_next = False
if last_template_def:
new_class.specifiers.append(" ".join(last_template_def))
last_template_def = []
i += 1
if tokens[i + 1] == "final":
new_class.specifiers.append(tokens[i + 2])
i += 1
# parse class ancestors...
# TODO: refactor!!
if tokens[i + 1] == ':':
i += 1
parent_class = ""
parent_access = "private"
specifiers = []
while True:
if tokens[i + 1] in ['{', ',']:
# try to find a reference to an already found type
parent_ref = Identifier(current_block[-1], current_block[-1], [parent_class], [])
new_class.ancestors.append([parent_ref.type, parent_access, specifiers])
parent_access = "private"
if tokens[i + 1] == '{':
break
elif tokens[i + 1] in ["public", "private", "protected"]:
parent_access = tokens[i + 1]
elif tokens[i + 1] == "virtual":
specifiers.append(tokens[i + 1])
else:
parent_class += tokens[i + 1]
i += 1
i += 1
if tokens[i] == ';':
i += 1
else:
next_block = new_class
# Parse enum definition...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "enum":
enum_name = ""
enum_type = "int"
is_scoped = False
if (tokens[i + 1] == "class") or (tokens[i + 1] == "struct"):
is_scoped = True
i += 1
if is_valid(tokens[i + 1]): # enum name given?
enum_name = tokens[i + 1]
i += 1
if tokens[i + 1] == ':':
enum_type = tokens[i + 2]
i += 2
new_enum = Enum(current_block[-1], enum_name, is_scoped, enum_type)
next_block = new_enum
i += 1
# Parse class access specifier...
elif isinstance(current_block[-1], Class) and tokens[i] == ':':
current_block[-1]._current_access = tokens[i - 1]
ASSERT_ISEXPECTED(current_block[-1]._current_access, ["private", "protected", "public"])
i += 1
# Parse function/method definition...
elif isinstance(current_block[-1], (Namespace, Class)) and tokens[i] == "(":
# concatenate tokens to handle operators and destructors
j = i - 1
k = i - 1
if isinstance(current_block[-1], Class) and (tokens[i - 2] == "operator"):
name = "operator" + tokens[i - 1]
j -= 1
k -= 1
else:
name = tokens[i - 1]
if tokens[i - 2] == '~':
name = "~" + name #dtor
j -= 1
k -= 1
# locate return value
while j >= min_index and tokens[j] not in ['{', '}', ';', ':']:
j -= 1
if not current_block[-1].omit and not omit_next:
ret_type = tokens[j + 1:k]
else:
ret_type = []
if isinstance(current_block[-1], Class):
if name[0] == "~":
method = Destructor(current_block[-1], name, ret_type)
else:
method = Method(current_block[-1], name, ret_type)
else:
method = Function(current_block[-1], name, ret_type)
if omit_next:
method.omit = True
omit_next = False
elif method.parent.omit:
method.omit = True
elif stub_next:
method.stub = True
stub_next = False
elif method.parent.stub:
method.stub = True
if last_template_def:
method.specifiers.append(" ".join(last_template_def))
last_template_def = []
# try to detect a function/macro call
function_call = not ret_type and ((name != current_block[-1].name) and (name !=
("~" + current_block[-1].name)))
# parse method parameters...
j = i
nest = 0
nest2 = 0
while tokens[i] != ')':
while tokens[j]:
if tokens[j] == '(':
nest += 1
elif tokens[j] == ')':
nest -= 1
if nest == 0 and nest2 == 0:
break
if tokens[j] == '<':
nest2 += 1
elif tokens[j] == '>':
nest2 -= 1
elif tokens[j] == ',' and nest == 1 and nest2 == 0:
break
j += 1
param = tokens[i + 1:j]
if len(param) and not (len(param) == 1 and param[0] == "void"): # remove C-style f(void)
value = []
if '=' in param:
assignment = param.index('=')
value = param[assignment + 1:]
param = param[0:assignment]
if not current_block[-1].omit and not method.omit:
Parameter(method, param, value)
i = j
j += 1
if nest:
raise ParserError("unmatched parenthesis '('")
if nest2:
raise ParserError("unmatched parenthesis '<'")
# parse post-declaration qualifiers/specifiers...
if isinstance(current_block[-1], Class):
while tokens[i] not in [';', '{', ':']:
# const, volatile
if tokens[i] in ["const", "volatile"]:
method.qualifiers.append(tokens[i])
# handle pure virtual methods
elif (tokens[i] == "="):
if tokens[i + 1] == "0" and "virtual" in method.specifiers: # mark the virtual function as pure
method.specifiers.append("pure-virtual")
elif tokens[i + 1] in ["default", "delete"]:
method.specifiers.append(tokens[i + 1])
i += 1
elif tokens[i] in ["override", "final", "noexcept"]:
method.specifiers.append(tokens[i])
i += 1
if function_call: # it was apparently a function call and not declaration, so remove it
current_block[-1].methods.pop()
else:
next_block = method
if tokens[i] == ';':
i += 1
elif tokens[i] == ':': # skip ctor initializers
while tokens[i] != '{':
i += 1
# Handle opening a compound block or a composite type
elif tokens[i] == '{':
current_block.append(next_block)
i += 1
# Handle closing a compound block/composite type
elif tokens[i] == '}':
if isinstance(current_block[-1], Class) and (tokens[i + 1] != ';'):
raise ParserError("definitions following a class declaration is not supported (%s)" %
current_block[-1].full_name)
if len(current_block) > 1:
current_block.pop()
else:
raise ParserError("unmatched brace '{'")
i += 1
next_block = Block(current_block[-1]) # new anonymous scope
# Parse variables and member attributes
elif isinstance(current_block[-1],
(Namespace, Class)) and tokens[i] == ';' and (is_valid(tokens[i - 1]) or tokens[i - 1] == "]"):
j = i - 1
while j >= min_index and tokens[j] not in ['{', '}', ';', ":"]:
j -= 1
if not current_block[-1].omit:
if isinstance(current_block[-1], Class):
Attribute(current_block[-1], tokens[j + 1:i])
else:
Variable(current_block[-1], tokens[j + 1:i])
i += 1
# Parse constants and member constants
elif isinstance(current_block[-1], (Namespace, Class)) and (tokens[i] == '=') and (tokens[i - 1] != "operator"):
j = i - 1
k = i + 1
while tokens[j] not in ['{', '}', ';', ":"]:
j -= 1
while tokens[k] != ';':
k += 1
if not current_block[-1].omit:
if isinstance(current_block[-1], Class):
Attribute(current_block[-1], tokens[j + 1:i], tokens[i + 1:k])
else:
Variable(current_block[-1], tokens[j + 1:i], tokens[i + 1:k])
i = k
# Parse an enum block...
elif isinstance(current_block[-1], Enum):
enum = current_block[-1]
j = i
while True:
if tokens[i] in ['}', ',']:
Enumerator(enum, tokens[j], tokens[j + 2:i] if tokens[j + 1] == '=' else None, enum.type)
if tokens[i + 1] == '}':
i += 1 # handle ,} situation
break
elif tokens[i] == '}':
break
else:
j = i + 1
i += 1
if in_typedef:
current_block[-2].typedefs[-1].type = Type(enum)
in_typedef = False
else:
i += 1
return global_namespace
# -------------------------------------------------------------------------
def ReadFile(source_file, includePaths, quiet=False, initial=""):
contents = initial
global current_file
try:
with open(source_file) as file:
file_content = file.read()
pos = 0
while True:
idx = file_content.find("@stubgen:include", pos)
if idx == -1:
idx = file_content.find("@insert", pos)
if idx != -1:
pos = idx + 1
match = re.search(r' \"(.+?)\"', file_content[idx:])
if match:
if match.group(1) != os.path.basename(os.path.realpath(source_file)):
tryPath = os.path.join(os.path.dirname(os.path.realpath(source_file)), match.group(1))
if os.path.isfile(tryPath):
prev = current_file
current_file = source_file
contents += ReadFile(tryPath, includePaths, False, contents)
current_file = prev
else:
raise LoaderError(source_file, "can't include '%s', file does not exist" % tryPath)
else:
raise LoaderError(source_file, "can't recursively include self")
else:
match = re.search(r' <(.+?)>', file_content[idx:])
if match:
found = False
for ipath in includePaths:
tryPath = os.path.join(ipath, match.group(1))
if os.path.isfile(tryPath):
prev = current_file
current_file = source_file
contents += ReadFile(tryPath, includePaths, True, contents)
current_file = prev
found = True
if not found:
raise LoaderError(source_file, "can't find '%s' in any of the include paths" % match.group(1))
else:
raise LoaderError(source_file, "syntax error at '%s'" % source_file)
else:
break
contents += "// @_file:%s\n" % source_file
contents += file_content
return contents
except FileNotFoundError:
if not quiet:
raise LoaderError(source_file, "failed to open file")
return ""
def ParseFile(source_file, includePaths = []):
contents = ReadFile(source_file, includePaths)
return Parse(contents)
def ParseFiles(source_files, includePaths = []):
contents = ""
for source_file in source_files:
if source_file:
quiet = (source_file[0] == "@")
contents += ReadFile((source_file[1:] if quiet else source_file), includePaths, quiet, "")
return Parse(contents)
def DumpTree(tree, ind=0):
indent = ind * " "
if isinstance(tree, (Namespace, Class)):
print(indent + str(tree))
for td in tree.typedefs:
print(indent + 2 * " " + str(td))
for e in tree.enums:
print(indent + 2 * " " + str(e))
for item in e.items:
print(indent + 4 * " " + str(item))
for v in tree.vars:
print(indent + 2 * " " + str(v))
if isinstance(tree, (Namespace, Class)):
for m in tree.methods:
print(indent + 2 * " " + str(m))
if isinstance(tree, (Namespace, Class)):
for c in tree.classes:
DumpTree(c, ind + 2)
if isinstance(tree, Namespace):
for n in tree.namespaces:
DumpTree(n, ind + 2)
if __name__ == "__main__":
tree = ParseFile(sys.argv[1], sys.argv[2:])
if isinstance(tree, Namespace):
DumpTree(tree)
else:
print(tree)
| true | true |
f73b28f6dd711dd6fec6459c15e78ad8339e0ec2 | 484 | py | Python | setup.py | lexibank/castroyi | 460c025941e54ba31103d1baadb9f3320db441a0 | [
"CC-BY-4.0"
] | null | null | null | setup.py | lexibank/castroyi | 460c025941e54ba31103d1baadb9f3320db441a0 | [
"CC-BY-4.0"
] | 4 | 2019-11-26T12:39:18.000Z | 2021-07-21T08:22:57.000Z | setup.py | lexibank/castroyi | 460c025941e54ba31103d1baadb9f3320db441a0 | [
"CC-BY-4.0"
] | null | null | null | from setuptools import setup
import json
with open("metadata.json", encoding="utf-8") as fp:
metadata = json.load(fp)
setup(
name="lexibank_castroyi",
description=metadata["title"],
license=metadata.get("license", ""),
url=metadata.get("url", ""),
py_modules=["lexibank_castroyi"],
include_package_data=True,
zip_safe=False,
entry_points={"lexibank.dataset": ["castroyi=lexibank_castroyi:Dataset"]},
install_requires=["pylexibank>=3.0"],
)
| 24.2 | 78 | 0.68595 | from setuptools import setup
import json
with open("metadata.json", encoding="utf-8") as fp:
metadata = json.load(fp)
setup(
name="lexibank_castroyi",
description=metadata["title"],
license=metadata.get("license", ""),
url=metadata.get("url", ""),
py_modules=["lexibank_castroyi"],
include_package_data=True,
zip_safe=False,
entry_points={"lexibank.dataset": ["castroyi=lexibank_castroyi:Dataset"]},
install_requires=["pylexibank>=3.0"],
)
| true | true |
f73b29639e420ead86b5724271504549b95b0b75 | 2,245 | py | Python | demo_wheel.py | mawillcockson/barcode-wheel | c0f8f6f281f013fd05c450d152d2d66b564b3a4a | [
"MIT"
] | null | null | null | demo_wheel.py | mawillcockson/barcode-wheel | c0f8f6f281f013fd05c450d152d2d66b564b3a4a | [
"MIT"
] | null | null | null | demo_wheel.py | mawillcockson/barcode-wheel | c0f8f6f281f013fd05c450d152d2d66b564b3a4a | [
"MIT"
] | 1 | 2019-04-29T21:13:35.000Z | 2019-04-29T21:13:35.000Z | """Demonstrates how an application might use the barcode_wheel library"""
import sys
import barcode_wheel
import svgwrite
import pathlib
import logging
import tempfile
import csv
from time import sleep
demo_contents = (
"""
22001,Money Order (Principal),
22101,Money Order (Fee),
10502,Club Card Savings,
12345678901,Test Product (Name Here),./4094485-random-picture.gif
9,Mardi Gras,
"""
)
PROG_NAME = pathlib.Path(sys.argv[0])
def main():
demo_file = pathlib.Path(f"./{PROG_NAME.with_suffix('.csv')}")
demo_svg = pathlib.Path(f"./{PROG_NAME.with_suffix('.svg')}")
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
log = logging.getLogger(f"{PROG_NAME.stem}")
log.info(
"""
This demo file includes a demo dataset.
To use your own dataset, make a file called 'demo_wheel.csv' and put it in the same folder
as this file.
The format for each line is:
PLU/UPC,Name,filepath to picture
"""
)
if demo_file.exists():
log.info(f"Using contents of '{demo_file}'")
else:
temp_file = tempfile.NamedTemporaryFile(mode="w+t", encoding="utf-8", dir=str(pathlib.Path.cwd()))
log.info(f"No file found\nUsing default demo contents in temporary file:\n{temp_file.name}")
demo_file = pathlib.Path(temp_file.name)
with demo_file.open(mode="w", newline="") as demo:
csv_writer = csv.writer(demo)
for row in demo_contents.split("\n"):
if not row:
continue
csv_writer.writerow(row.split(","))
num_slices = 0
with demo_file.open() as demo:
reader = csv.DictReader(
f=demo,
fieldnames=["PLU", "NAME", "PICTURE"],
)
for line in reader:
continue
drawing = svgwrite.Drawing(
filename=str(demo_svg),
size=("100%", "100%"),
#profile="tiny",
preserveAspectRatio="xMidyMid meet",
viewBox="0 0 200 200",
)
wheel, placeholders, defs = barcode_wheel.wheel_template(
center=(100, 100), radius=100, num_slices=9
)
drawing.add(wheel)
for def_item in defs:
drawing.defs.add(def_item)
drawing.save()
if __name__ == "__main__":
main()
| 25.224719 | 106 | 0.635635 | import sys
import barcode_wheel
import svgwrite
import pathlib
import logging
import tempfile
import csv
from time import sleep
demo_contents = (
"""
22001,Money Order (Principal),
22101,Money Order (Fee),
10502,Club Card Savings,
12345678901,Test Product (Name Here),./4094485-random-picture.gif
9,Mardi Gras,
"""
)
PROG_NAME = pathlib.Path(sys.argv[0])
def main():
demo_file = pathlib.Path(f"./{PROG_NAME.with_suffix('.csv')}")
demo_svg = pathlib.Path(f"./{PROG_NAME.with_suffix('.svg')}")
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
log = logging.getLogger(f"{PROG_NAME.stem}")
log.info(
"""
This demo file includes a demo dataset.
To use your own dataset, make a file called 'demo_wheel.csv' and put it in the same folder
as this file.
The format for each line is:
PLU/UPC,Name,filepath to picture
"""
)
if demo_file.exists():
log.info(f"Using contents of '{demo_file}'")
else:
temp_file = tempfile.NamedTemporaryFile(mode="w+t", encoding="utf-8", dir=str(pathlib.Path.cwd()))
log.info(f"No file found\nUsing default demo contents in temporary file:\n{temp_file.name}")
demo_file = pathlib.Path(temp_file.name)
with demo_file.open(mode="w", newline="") as demo:
csv_writer = csv.writer(demo)
for row in demo_contents.split("\n"):
if not row:
continue
csv_writer.writerow(row.split(","))
num_slices = 0
with demo_file.open() as demo:
reader = csv.DictReader(
f=demo,
fieldnames=["PLU", "NAME", "PICTURE"],
)
for line in reader:
continue
drawing = svgwrite.Drawing(
filename=str(demo_svg),
size=("100%", "100%"),
preserveAspectRatio="xMidyMid meet",
viewBox="0 0 200 200",
)
wheel, placeholders, defs = barcode_wheel.wheel_template(
center=(100, 100), radius=100, num_slices=9
)
drawing.add(wheel)
for def_item in defs:
drawing.defs.add(def_item)
drawing.save()
if __name__ == "__main__":
main()
| true | true |
f73b29cdc2364a1c4311d41511f41374a4bc8393 | 3,420 | py | Python | lightautoml/pipelines/ml/whitebox_ml_pipe.py | alexeytomashyov/LightAutoML | 41e8e10d430344dbdb5e39dd48342653ee31988c | [
"Apache-2.0"
] | null | null | null | lightautoml/pipelines/ml/whitebox_ml_pipe.py | alexeytomashyov/LightAutoML | 41e8e10d430344dbdb5e39dd48342653ee31988c | [
"Apache-2.0"
] | null | null | null | lightautoml/pipelines/ml/whitebox_ml_pipe.py | alexeytomashyov/LightAutoML | 41e8e10d430344dbdb5e39dd48342653ee31988c | [
"Apache-2.0"
] | null | null | null | """Whitebox MLPipeline."""
import warnings
from typing import Union, Tuple, cast
from .base import MLPipeline
from ..features.wb_pipeline import WBFeatures
from ..selection.base import EmptySelector
from ...dataset.np_pd_dataset import NumpyDataset, PandasDataset
from ...ml_algo.tuning.base import ParamsTuner
from ...ml_algo.whitebox import WbMLAlgo
from ...validation.base import TrainValidIterator, DummyIterator
TunedWB = Union[WbMLAlgo, Tuple[WbMLAlgo, ParamsTuner]]
class WBPipeline(MLPipeline):
"""Special pipeline to handle whitebox model."""
@property
def whitebox(self) -> WbMLAlgo:
if len(self.ml_algos[0].models) > 1:
warnings.warn('More than 1 whitebox model is fitted during cross validation. Only first is returned')
return self.ml_algos[0].models[0]
def __init__(self, whitebox: TunedWB):
"""Create whitebox MLPipeline.
Args:
whitebox: WbMLAlgo or tuple WbMLAlgo with params tuner.
"""
super().__init__([whitebox], True, features_pipeline=WBFeatures())
self._used_features = None
def fit_predict(self, train_valid: TrainValidIterator) -> NumpyDataset:
"""Fit whitebox.
Args:
train_valid: TrainValidIterator.
Returns:
Dataset.
"""
_subsamp_to_refit = train_valid.train[:5]
val_pred = super().fit_predict(train_valid)
self._prune_pipelines(_subsamp_to_refit)
return cast(NumpyDataset, val_pred)
def predict(self, dataset: PandasDataset, report: bool = False) -> NumpyDataset:
"""Predict whitebox.
Additional report param stands for whitebox report generation.
Args:
dataset: PandasDataset of input features.
report: generate report.
Returns:
Dataset.
"""
dataset = self.features_pipeline.transform(dataset)
args = []
if self.ml_algos[0].params['report']:
args = [report]
pred = self.ml_algos[0].predict(dataset, *args)
return pred
def _prune_pipelines(self, subsamp: PandasDataset):
# upd used features attribute from list of whiteboxes
feats_from_wb = set.union(*[set(list(x.features_fit.index)) for x in self.ml_algos[0].models])
# cols wo prefix - numerics and categories
raw_columns = list(set(subsamp.features).intersection(feats_from_wb))
diff_cols = list(set(feats_from_wb).difference(subsamp.features))
seasons = ['__'.join(x.split('__')[1:]) for x in diff_cols if x.startswith('season_')]
base_diff = [x.split('__') for x in diff_cols if x.startswith('basediff_')]
base_diff = [('_'.join(x[0].split('_')[1:]), '__'.join(x[1:])) for x in base_diff]
base_dates, compare_dates = [x[0] for x in base_diff], [x[1] for x in base_diff]
dates = list(set(base_dates + compare_dates + seasons))
raw_columns.extend(dates)
subsamp = subsamp[:, raw_columns]
self.features_pipeline = WBFeatures()
self.pre_selection = EmptySelector()
self.post_selection = EmptySelector()
train_valid = DummyIterator(subsamp)
train_valid = train_valid.apply_selector(self.pre_selection)
train_valid = train_valid.apply_feature_pipeline(self.features_pipeline)
train_valid.apply_selector(self.post_selection)
return
| 33.861386 | 113 | 0.664035 |
import warnings
from typing import Union, Tuple, cast
from .base import MLPipeline
from ..features.wb_pipeline import WBFeatures
from ..selection.base import EmptySelector
from ...dataset.np_pd_dataset import NumpyDataset, PandasDataset
from ...ml_algo.tuning.base import ParamsTuner
from ...ml_algo.whitebox import WbMLAlgo
from ...validation.base import TrainValidIterator, DummyIterator
TunedWB = Union[WbMLAlgo, Tuple[WbMLAlgo, ParamsTuner]]
class WBPipeline(MLPipeline):
@property
def whitebox(self) -> WbMLAlgo:
if len(self.ml_algos[0].models) > 1:
warnings.warn('More than 1 whitebox model is fitted during cross validation. Only first is returned')
return self.ml_algos[0].models[0]
def __init__(self, whitebox: TunedWB):
super().__init__([whitebox], True, features_pipeline=WBFeatures())
self._used_features = None
def fit_predict(self, train_valid: TrainValidIterator) -> NumpyDataset:
_subsamp_to_refit = train_valid.train[:5]
val_pred = super().fit_predict(train_valid)
self._prune_pipelines(_subsamp_to_refit)
return cast(NumpyDataset, val_pred)
def predict(self, dataset: PandasDataset, report: bool = False) -> NumpyDataset:
dataset = self.features_pipeline.transform(dataset)
args = []
if self.ml_algos[0].params['report']:
args = [report]
pred = self.ml_algos[0].predict(dataset, *args)
return pred
def _prune_pipelines(self, subsamp: PandasDataset):
feats_from_wb = set.union(*[set(list(x.features_fit.index)) for x in self.ml_algos[0].models])
raw_columns = list(set(subsamp.features).intersection(feats_from_wb))
diff_cols = list(set(feats_from_wb).difference(subsamp.features))
seasons = ['__'.join(x.split('__')[1:]) for x in diff_cols if x.startswith('season_')]
base_diff = [x.split('__') for x in diff_cols if x.startswith('basediff_')]
base_diff = [('_'.join(x[0].split('_')[1:]), '__'.join(x[1:])) for x in base_diff]
base_dates, compare_dates = [x[0] for x in base_diff], [x[1] for x in base_diff]
dates = list(set(base_dates + compare_dates + seasons))
raw_columns.extend(dates)
subsamp = subsamp[:, raw_columns]
self.features_pipeline = WBFeatures()
self.pre_selection = EmptySelector()
self.post_selection = EmptySelector()
train_valid = DummyIterator(subsamp)
train_valid = train_valid.apply_selector(self.pre_selection)
train_valid = train_valid.apply_feature_pipeline(self.features_pipeline)
train_valid.apply_selector(self.post_selection)
return
| true | true |
f73b2a60ec4908a6430d99181f8a20820fb1d123 | 4,692 | py | Python | pajbot/modules/clr_overlay/emotecombo.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 128 | 2015-12-28T01:02:30.000Z | 2019-05-24T21:20:50.000Z | pajbot/modules/clr_overlay/emotecombo.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 277 | 2015-05-03T18:48:57.000Z | 2019-05-23T17:41:28.000Z | pajbot/modules/clr_overlay/emotecombo.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 96 | 2015-08-07T18:49:50.000Z | 2019-05-20T19:49:27.000Z | from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Set
import logging
from pajbot.managers.handler import HandlerManager
from pajbot.models.emote import Emote, EmoteInstance, EmoteInstanceCountMap
from pajbot.modules import BaseModule
from pajbot.modules.base import ModuleSetting
from pajbot.modules.clr_overlay import CLROverlayModule
if TYPE_CHECKING:
from pajbot.bot import Bot
log = logging.getLogger(__name__)
class EmoteComboModule(BaseModule):
ID = __name__.rsplit(".", maxsplit=1)[-1]
NAME = "Emote Combos"
DESCRIPTION = "Shows emote combos on the CLR pajbot overlay"
CATEGORY = "Feature"
PARENT_MODULE = CLROverlayModule
SETTINGS = [
ModuleSetting(
key="min_emote_combo",
label="Minimum number of emotes required to trigger the combo",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 2, "max_value": 100},
),
ModuleSetting(
key="emote_allowlist",
label=CLROverlayModule.ALLOWLIST_LABEL,
type="text",
required=True,
placeholder=CLROverlayModule.EMOTELIST_PLACEHOLDER_TEXT,
default="",
),
ModuleSetting(
key="emote_blocklist",
label=CLROverlayModule.BLOCKLIST_LABEL,
type="text",
required=True,
placeholder=CLROverlayModule.EMOTELIST_PLACEHOLDER_TEXT,
default="",
),
]
def __init__(self, bot: Optional[Bot]) -> None:
super().__init__(bot)
self.allowlisted_emotes: Set[str] = set()
self.blocklisted_emotes: Set[str] = set()
self.parent_module: Optional[CLROverlayModule] = (
CLROverlayModule.convert(self.bot.module_manager["clroverlay-group"]) if self.bot else None
)
self.emote_count: int = 0
self.current_emote: Optional[Emote] = None
def on_loaded(self) -> None:
self.allowlisted_emotes = set(
self.settings["emote_allowlist"].strip().split(" ") if self.settings["emote_allowlist"] else []
)
self.blocklisted_emotes = set(
self.settings["emote_blocklist"].strip().split(" ") if self.settings["emote_blocklist"] else []
)
def is_emote_allowed(self, emote_code: str) -> bool:
if len(self.allowlisted_emotes) > 0:
return emote_code in self.allowlisted_emotes
if len(self.blocklisted_emotes) > 0:
return emote_code not in self.blocklisted_emotes
if not self.parent_module:
return True
return self.parent_module.is_emote_allowed(emote_code)
def inc_emote_count(self) -> None:
if self.bot is None:
log.warning("EmoteCombo inc_emote_count called when bot is none")
return
assert self.current_emote is not None
self.emote_count += 1
if self.emote_count >= self.settings["min_emote_combo"]:
self.bot.websocket_manager.emit(
"emote_combo", {"emote": self.current_emote.jsonify(), "count": self.emote_count}
)
def reset(self) -> None:
self.emote_count = 0
self.current_emote = None
def on_message(
self, emote_instances: List[EmoteInstance], emote_counts: EmoteInstanceCountMap, whisper: bool, **rest: Any
) -> bool:
if whisper:
return True
# Check if the message contains exactly one unique emote
num_unique_emotes = len(emote_counts)
if num_unique_emotes != 1:
self.reset()
return True
new_emote = emote_instances[0].emote
new_emote_code = new_emote.code
if self.is_emote_allowed(new_emote_code) is False:
self.reset()
return True
# if there is currently a combo...
if self.current_emote is not None:
# and this emote is not equal to the combo emote...
if self.current_emote.code != new_emote_code:
# The emote of this message is not the one we were previously counting, reset.
# We do not stop.
# We start counting this emote instead.
self.reset()
if self.current_emote is None:
self.current_emote = new_emote
self.inc_emote_count()
return True
def enable(self, bot: Optional[Bot]) -> None:
HandlerManager.add_handler("on_message", self.on_message)
def disable(self, bot: Optional[Bot]) -> None:
HandlerManager.remove_handler("on_message", self.on_message)
| 32.811189 | 115 | 0.625959 | from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, Set
import logging
from pajbot.managers.handler import HandlerManager
from pajbot.models.emote import Emote, EmoteInstance, EmoteInstanceCountMap
from pajbot.modules import BaseModule
from pajbot.modules.base import ModuleSetting
from pajbot.modules.clr_overlay import CLROverlayModule
if TYPE_CHECKING:
from pajbot.bot import Bot
log = logging.getLogger(__name__)
class EmoteComboModule(BaseModule):
ID = __name__.rsplit(".", maxsplit=1)[-1]
NAME = "Emote Combos"
DESCRIPTION = "Shows emote combos on the CLR pajbot overlay"
CATEGORY = "Feature"
PARENT_MODULE = CLROverlayModule
SETTINGS = [
ModuleSetting(
key="min_emote_combo",
label="Minimum number of emotes required to trigger the combo",
type="number",
required=True,
placeholder="",
default=5,
constraints={"min_value": 2, "max_value": 100},
),
ModuleSetting(
key="emote_allowlist",
label=CLROverlayModule.ALLOWLIST_LABEL,
type="text",
required=True,
placeholder=CLROverlayModule.EMOTELIST_PLACEHOLDER_TEXT,
default="",
),
ModuleSetting(
key="emote_blocklist",
label=CLROverlayModule.BLOCKLIST_LABEL,
type="text",
required=True,
placeholder=CLROverlayModule.EMOTELIST_PLACEHOLDER_TEXT,
default="",
),
]
def __init__(self, bot: Optional[Bot]) -> None:
super().__init__(bot)
self.allowlisted_emotes: Set[str] = set()
self.blocklisted_emotes: Set[str] = set()
self.parent_module: Optional[CLROverlayModule] = (
CLROverlayModule.convert(self.bot.module_manager["clroverlay-group"]) if self.bot else None
)
self.emote_count: int = 0
self.current_emote: Optional[Emote] = None
def on_loaded(self) -> None:
self.allowlisted_emotes = set(
self.settings["emote_allowlist"].strip().split(" ") if self.settings["emote_allowlist"] else []
)
self.blocklisted_emotes = set(
self.settings["emote_blocklist"].strip().split(" ") if self.settings["emote_blocklist"] else []
)
def is_emote_allowed(self, emote_code: str) -> bool:
if len(self.allowlisted_emotes) > 0:
return emote_code in self.allowlisted_emotes
if len(self.blocklisted_emotes) > 0:
return emote_code not in self.blocklisted_emotes
if not self.parent_module:
return True
return self.parent_module.is_emote_allowed(emote_code)
def inc_emote_count(self) -> None:
if self.bot is None:
log.warning("EmoteCombo inc_emote_count called when bot is none")
return
assert self.current_emote is not None
self.emote_count += 1
if self.emote_count >= self.settings["min_emote_combo"]:
self.bot.websocket_manager.emit(
"emote_combo", {"emote": self.current_emote.jsonify(), "count": self.emote_count}
)
def reset(self) -> None:
self.emote_count = 0
self.current_emote = None
def on_message(
self, emote_instances: List[EmoteInstance], emote_counts: EmoteInstanceCountMap, whisper: bool, **rest: Any
) -> bool:
if whisper:
return True
num_unique_emotes = len(emote_counts)
if num_unique_emotes != 1:
self.reset()
return True
new_emote = emote_instances[0].emote
new_emote_code = new_emote.code
if self.is_emote_allowed(new_emote_code) is False:
self.reset()
return True
if self.current_emote is not None:
if self.current_emote.code != new_emote_code:
self.reset()
if self.current_emote is None:
self.current_emote = new_emote
self.inc_emote_count()
return True
def enable(self, bot: Optional[Bot]) -> None:
HandlerManager.add_handler("on_message", self.on_message)
def disable(self, bot: Optional[Bot]) -> None:
HandlerManager.remove_handler("on_message", self.on_message)
| true | true |
f73b2c32fe94d65086455725c53011f0661e61b3 | 2,852 | py | Python | Movie_Exp.py | Coop37/Movies_Example | 83a9c097e5e26e9d094c6bac49467d1695da53c6 | [
"MIT"
] | null | null | null | Movie_Exp.py | Coop37/Movies_Example | 83a9c097e5e26e9d094c6bac49467d1695da53c6 | [
"MIT"
] | null | null | null | Movie_Exp.py | Coop37/Movies_Example | 83a9c097e5e26e9d094c6bac49467d1695da53c6 | [
"MIT"
] | 1 | 2020-10-27T08:34:40.000Z | 2020-10-27T08:34:40.000Z | import os
from py2neo import Graph
import ast
from json import dumps
from flask import Flask, render_template, g, Response, request
from neo4j import GraphDatabase, basic_auth
app = Flask(__name__)
app.debug = True
# The password must be changed to your NEO4J password.
driver = GraphDatabase.driver('bolt://localhost',auth=basic_auth("neo4j", '6669'))
hold = '?'
def get_db():
if not hasattr(g, 'neo4j_db'):
g.neo4j_db = driver.session()
return g.neo4j_db
@app.route("/")
def get_index():
global hold
# The cypher text between the quotes can be run on the NEO4J Browser with the Movies
# GraphDatabase.
hold = "MATCH (a:Person)-[r:ACTED_IN|:DIRECTED|:PRODUCED|:REVIEWED|:WROTE]->(m:Movie) RETURN 'N1L1N2' AS scrn, 'Person' AS N1L, a.name AS N1I, a.name AS N1T, type(r) AS L1L, type(r) AS L1T, 'Movie' AS N2L, m.title AS N2T, m.title AS N2I ORDER BY N1I"
return render_template("Movies_Exp_index.html")
@app.route("/graph")
def get_graph():
db = get_db()
results = db.run(hold)
nodes = []
rels = []
i = 0
N1_id = '?'
N2_id = '?'
N3_id = '?'
id = ""
sid = ""
tid = ""
# The screen routine below, N1L1N2, is a simple,
# generalized procedure for graphing
# a returned cypher response for a node,
# through a link, to a node query, (n)-[r]->(m).
# N1 is node 1, L1 is link 1 and N2 is node 2.
# N1L, L1L, and N2L are the node and link types
# for the previous 3 items.
# N1I and N2I are identifiers for grouping togther
# returned response arrays for sorting requirements.
# In this case all the links for a paricular person (ie. name) are used
# for sorting. If you take the cypher query above,
# you can follow the logic for doing each returned
# message array.
# "id" is the node id <id>
# "sid" is the id of the start node
# "tid" is the id of the target node
# The N1T, L1T and N2T variables are text messages that are displayed on nodes and links.
# The text is under program control so it can be tailored.
for record in results:
zz = record["scrn"]
if zz == 'N1L1N2':
if N1_id != record["N1I"]:
id = i
nodes.append({"label": record["N1L"], "id" : i, "title" : record["N1T"]})
N1_id = record["N1I"]
x = i
i = i + 1
else:
N1_id = record["N1I"]
nodes.append({"label": record["N2L"], "id" : i, "title" : record["N2T"] })
N2_id = record["N2I"]
y = i
i = i + 1
sid = x
tid = y
rels.append({"source": x, "target": y, "label": record["L1L"], "title": record["L1T"], "sid" : sid, "tid" : tid})
return Response(dumps({"nodes": nodes, "links": rels}), mimetype="application/json")
app.run(host='127.0.0.1', port= 5000)
| 33.162791 | 254 | 0.605189 | import os
from py2neo import Graph
import ast
from json import dumps
from flask import Flask, render_template, g, Response, request
from neo4j import GraphDatabase, basic_auth
app = Flask(__name__)
app.debug = True
driver = GraphDatabase.driver('bolt://localhost',auth=basic_auth("neo4j", '6669'))
hold = '?'
def get_db():
if not hasattr(g, 'neo4j_db'):
g.neo4j_db = driver.session()
return g.neo4j_db
@app.route("/")
def get_index():
global hold
hold = "MATCH (a:Person)-[r:ACTED_IN|:DIRECTED|:PRODUCED|:REVIEWED|:WROTE]->(m:Movie) RETURN 'N1L1N2' AS scrn, 'Person' AS N1L, a.name AS N1I, a.name AS N1T, type(r) AS L1L, type(r) AS L1T, 'Movie' AS N2L, m.title AS N2T, m.title AS N2I ORDER BY N1I"
return render_template("Movies_Exp_index.html")
@app.route("/graph")
def get_graph():
db = get_db()
results = db.run(hold)
nodes = []
rels = []
i = 0
N1_id = '?'
N2_id = '?'
N3_id = '?'
id = ""
sid = ""
tid = ""
for record in results:
zz = record["scrn"]
if zz == 'N1L1N2':
if N1_id != record["N1I"]:
id = i
nodes.append({"label": record["N1L"], "id" : i, "title" : record["N1T"]})
N1_id = record["N1I"]
x = i
i = i + 1
else:
N1_id = record["N1I"]
nodes.append({"label": record["N2L"], "id" : i, "title" : record["N2T"] })
N2_id = record["N2I"]
y = i
i = i + 1
sid = x
tid = y
rels.append({"source": x, "target": y, "label": record["L1L"], "title": record["L1T"], "sid" : sid, "tid" : tid})
return Response(dumps({"nodes": nodes, "links": rels}), mimetype="application/json")
app.run(host='127.0.0.1', port= 5000)
| true | true |
f73b2c8d2590059b4bd26b86989a83e72697d2f9 | 6,983 | py | Python | metaphor/looker/extractor.py | MetaphorData/connectors | e195f58ee0660b278bd72d16173937fae279eef2 | [
"Apache-2.0"
] | 5 | 2021-08-24T17:46:48.000Z | 2022-01-21T03:42:31.000Z | metaphor/looker/extractor.py | MetaphorData/connectors | e195f58ee0660b278bd72d16173937fae279eef2 | [
"Apache-2.0"
] | 142 | 2021-09-02T22:16:54.000Z | 2022-03-31T15:23:34.000Z | metaphor/looker/extractor.py | MetaphorData/connectors | dd0e2092f791196dd9becce7da28923a4875370d | [
"Apache-2.0"
] | null | null | null | import os
from typing import Dict, Iterable, List, Sequence, Set, Tuple
try:
import looker_sdk
from looker_sdk.sdk.api31.methods import Looker31SDK
from looker_sdk.sdk.api31.models import DashboardElement
except ImportError:
print("Please install metaphor[looker] extra\n")
raise
from metaphor.models.metadata_change_event import (
Chart,
ChartType,
Dashboard,
DashboardInfo,
DashboardLogicalID,
DashboardPlatform,
DashboardUpstream,
MetadataChangeEvent,
VirtualViewType,
)
from metaphor.common.entity_id import to_virtual_view_entity_id
from metaphor.common.event_util import EventUtil
from metaphor.common.extractor import BaseExtractor
from metaphor.common.logger import get_logger
from metaphor.looker.config import LookerConnectionConfig, LookerRunConfig
from metaphor.looker.lookml_parser import Model, fullname, parse_project
logger = get_logger(__name__)
class LookerExtractor(BaseExtractor):
"""Looker metadata extractor"""
@staticmethod
def config_class():
return LookerRunConfig
vis_type_map = {
"looker_area": ChartType.AREA,
"looker_bar": ChartType.BAR,
"looker_boxplot": ChartType.BOX_PLOT,
"looker_column": ChartType.COLUMN,
"looker_donut_multiples": ChartType.DONUT,
"looker_line": ChartType.LINE,
"looker_map": ChartType.MAP,
"looker_geo_coordinates": ChartType.MAP,
"looker_geo_choropleth": ChartType.MAP,
"looker_pie": ChartType.PIE,
"looker_scatter": ChartType.SCATTER,
"table": ChartType.TABLE,
"looker_grid": ChartType.TABLE,
"looker_single_record": ChartType.TABLE,
"single_value": ChartType.TEXT,
"text": ChartType.TEXT,
}
def initSdk(self, config: LookerRunConfig) -> Looker31SDK:
# Load config using environment variables instead from looker.ini file
# See https://github.com/looker-open-source/sdk-codegen#environment-variable-configuration
os.environ["LOOKERSDK_BASE_URL"] = config.base_url
os.environ["LOOKERSDK_CLIENT_ID"] = config.client_id
os.environ["LOOKERSDK_CLIENT_SECRET"] = config.client_secret
os.environ["LOOKERSDK_VERIFY_SSL"] = str(config.verify_ssl)
os.environ["LOOKERSDK_TIMEOUT"] = str(config.timeout)
return looker_sdk.init31()
async def extract(self, config: LookerRunConfig) -> List[MetadataChangeEvent]:
assert isinstance(config, LookerExtractor.config_class())
logger.info("Fetching metadata from Looker")
sdk = self.initSdk(config)
# Lower case all connection names for case-insensitive lookup
connections: Dict[str, LookerConnectionConfig] = {
k.lower(): v for (k, v) in config.connections.items()
}
model_map, virtual_views = parse_project(
config.lookml_dir, connections, config.project_source_url
)
dashboards = self._fetch_dashboards(config, sdk, model_map)
dashboard_events = [EventUtil.build_dashboard_event(d) for d in dashboards]
virtual_view_events = [
EventUtil.build_virtual_view_event(d) for d in virtual_views
]
return dashboard_events + virtual_view_events
def _fetch_dashboards(
self, config: LookerRunConfig, sdk: Looker31SDK, model_map: Dict[str, Model]
) -> List[Dashboard]:
dashboards: List[Dashboard] = []
for basic_dashboard in sdk.all_dashboards():
assert basic_dashboard.id is not None
dashboard = sdk.dashboard(dashboard_id=basic_dashboard.id)
dashboard_info = DashboardInfo()
dashboard_info.title = dashboard.title
dashboard_info.description = dashboard.description
dashboard_info.url = (
f"{config.base_url}/{dashboard.preferred_viewer}/{dashboard.id}"
)
# All numeric fields must be converted to "float" to meet quicktype's expectation
if dashboard.view_count is not None:
dashboard_info.view_count = float(dashboard.view_count)
dashboard_info.charts = []
upstream = None
if dashboard.dashboard_elements is not None:
(dashboard_info.charts, upstream) = self._extract_charts(
dashboard.dashboard_elements, model_map
)
dashboards.append(
Dashboard(
logical_id=DashboardLogicalID(
dashboard.id, DashboardPlatform.LOOKER
),
dashboard_info=dashboard_info,
upstream=upstream,
)
)
return dashboards
def _extract_charts(
self,
dashboard_elements: Sequence[DashboardElement],
model_map: Dict[str, Model],
) -> Tuple[List[Chart], DashboardUpstream]:
charts = []
explore_ids: Set[str] = set()
for e in filter(lambda e: e.type == "vis", dashboard_elements):
if e.result_maker is None:
logger.warning(f"Unable to find result_maker in element {e.title}")
continue
chart_type = None
if e.result_maker.vis_config is not None:
chart_type = self.vis_type_map.get(
e.result_maker.vis_config.get("type", ""), ChartType.OTHER
)
charts.append(
Chart(
# Use "id" if "title" is None or empty string
title=e.title if e.title else e.id,
description=e.note_text,
chart_type=chart_type,
)
)
if not isinstance(e.result_maker.filterables, Iterable):
logger.warning(f"Unable to iterate filterables in element {e.title}")
continue
for f in e.result_maker.filterables:
if f.model is None or f.view is None:
logger.warning(f"Missing model or view in element {e.title}")
continue
model = model_map.get(f.model)
if model is None:
logger.error(f"Chart {e.title} references invalid model {f.model}")
continue
explore = model.explores.get(f.view)
if explore is None:
logger.error(f"Chart {e.title} references invalid explore {f.view}")
continue
explore_ids.add(
str(
to_virtual_view_entity_id(
fullname(f.model, explore.name),
VirtualViewType.LOOKER_EXPLORE,
)
)
)
return (
charts,
DashboardUpstream(
source_virtual_views=list(explore_ids),
),
)
| 35.810256 | 98 | 0.608621 | import os
from typing import Dict, Iterable, List, Sequence, Set, Tuple
try:
import looker_sdk
from looker_sdk.sdk.api31.methods import Looker31SDK
from looker_sdk.sdk.api31.models import DashboardElement
except ImportError:
print("Please install metaphor[looker] extra\n")
raise
from metaphor.models.metadata_change_event import (
Chart,
ChartType,
Dashboard,
DashboardInfo,
DashboardLogicalID,
DashboardPlatform,
DashboardUpstream,
MetadataChangeEvent,
VirtualViewType,
)
from metaphor.common.entity_id import to_virtual_view_entity_id
from metaphor.common.event_util import EventUtil
from metaphor.common.extractor import BaseExtractor
from metaphor.common.logger import get_logger
from metaphor.looker.config import LookerConnectionConfig, LookerRunConfig
from metaphor.looker.lookml_parser import Model, fullname, parse_project
logger = get_logger(__name__)
class LookerExtractor(BaseExtractor):
@staticmethod
def config_class():
return LookerRunConfig
vis_type_map = {
"looker_area": ChartType.AREA,
"looker_bar": ChartType.BAR,
"looker_boxplot": ChartType.BOX_PLOT,
"looker_column": ChartType.COLUMN,
"looker_donut_multiples": ChartType.DONUT,
"looker_line": ChartType.LINE,
"looker_map": ChartType.MAP,
"looker_geo_coordinates": ChartType.MAP,
"looker_geo_choropleth": ChartType.MAP,
"looker_pie": ChartType.PIE,
"looker_scatter": ChartType.SCATTER,
"table": ChartType.TABLE,
"looker_grid": ChartType.TABLE,
"looker_single_record": ChartType.TABLE,
"single_value": ChartType.TEXT,
"text": ChartType.TEXT,
}
def initSdk(self, config: LookerRunConfig) -> Looker31SDK:
_URL"] = config.base_url
os.environ["LOOKERSDK_CLIENT_ID"] = config.client_id
os.environ["LOOKERSDK_CLIENT_SECRET"] = config.client_secret
os.environ["LOOKERSDK_VERIFY_SSL"] = str(config.verify_ssl)
os.environ["LOOKERSDK_TIMEOUT"] = str(config.timeout)
return looker_sdk.init31()
async def extract(self, config: LookerRunConfig) -> List[MetadataChangeEvent]:
assert isinstance(config, LookerExtractor.config_class())
logger.info("Fetching metadata from Looker")
sdk = self.initSdk(config)
connections: Dict[str, LookerConnectionConfig] = {
k.lower(): v for (k, v) in config.connections.items()
}
model_map, virtual_views = parse_project(
config.lookml_dir, connections, config.project_source_url
)
dashboards = self._fetch_dashboards(config, sdk, model_map)
dashboard_events = [EventUtil.build_dashboard_event(d) for d in dashboards]
virtual_view_events = [
EventUtil.build_virtual_view_event(d) for d in virtual_views
]
return dashboard_events + virtual_view_events
def _fetch_dashboards(
self, config: LookerRunConfig, sdk: Looker31SDK, model_map: Dict[str, Model]
) -> List[Dashboard]:
dashboards: List[Dashboard] = []
for basic_dashboard in sdk.all_dashboards():
assert basic_dashboard.id is not None
dashboard = sdk.dashboard(dashboard_id=basic_dashboard.id)
dashboard_info = DashboardInfo()
dashboard_info.title = dashboard.title
dashboard_info.description = dashboard.description
dashboard_info.url = (
f"{config.base_url}/{dashboard.preferred_viewer}/{dashboard.id}"
)
if dashboard.view_count is not None:
dashboard_info.view_count = float(dashboard.view_count)
dashboard_info.charts = []
upstream = None
if dashboard.dashboard_elements is not None:
(dashboard_info.charts, upstream) = self._extract_charts(
dashboard.dashboard_elements, model_map
)
dashboards.append(
Dashboard(
logical_id=DashboardLogicalID(
dashboard.id, DashboardPlatform.LOOKER
),
dashboard_info=dashboard_info,
upstream=upstream,
)
)
return dashboards
def _extract_charts(
self,
dashboard_elements: Sequence[DashboardElement],
model_map: Dict[str, Model],
) -> Tuple[List[Chart], DashboardUpstream]:
charts = []
explore_ids: Set[str] = set()
for e in filter(lambda e: e.type == "vis", dashboard_elements):
if e.result_maker is None:
logger.warning(f"Unable to find result_maker in element {e.title}")
continue
chart_type = None
if e.result_maker.vis_config is not None:
chart_type = self.vis_type_map.get(
e.result_maker.vis_config.get("type", ""), ChartType.OTHER
)
charts.append(
Chart(
# Use "id" if "title" is None or empty string
title=e.title if e.title else e.id,
description=e.note_text,
chart_type=chart_type,
)
)
if not isinstance(e.result_maker.filterables, Iterable):
logger.warning(f"Unable to iterate filterables in element {e.title}")
continue
for f in e.result_maker.filterables:
if f.model is None or f.view is None:
logger.warning(f"Missing model or view in element {e.title}")
continue
model = model_map.get(f.model)
if model is None:
logger.error(f"Chart {e.title} references invalid model {f.model}")
continue
explore = model.explores.get(f.view)
if explore is None:
logger.error(f"Chart {e.title} references invalid explore {f.view}")
continue
explore_ids.add(
str(
to_virtual_view_entity_id(
fullname(f.model, explore.name),
VirtualViewType.LOOKER_EXPLORE,
)
)
)
return (
charts,
DashboardUpstream(
source_virtual_views=list(explore_ids),
),
)
| true | true |
f73b2d0db43898ad6371e679c14ffbb86cb40210 | 2,100 | py | Python | find-subnets.py | hmcts/cnp-module-postgres | 7e8ce4feb3f1c1e586fd989b411eed93d7fb2bfd | [
"MIT"
] | 1 | 2019-10-02T12:50:50.000Z | 2019-10-02T12:50:50.000Z | find-subnets.py | hmcts/cnp-module-postgres | 7e8ce4feb3f1c1e586fd989b411eed93d7fb2bfd | [
"MIT"
] | 5 | 2019-05-01T16:41:13.000Z | 2021-11-12T13:50:14.000Z | find-subnets.py | hmcts/cnp-module-postgres | 7e8ce4feb3f1c1e586fd989b411eed93d7fb2bfd | [
"MIT"
] | 1 | 2021-04-10T22:50:13.000Z | 2021-04-10T22:50:13.000Z | #!/usr/bin/env python3
import sys
import json
import urllib.request
def find_subnet_rules(env, product, subnets):
all_subnets = get_all_subnets(env, product, subnets)
rule_names = [x['rule_name'] for x in all_subnets]
subnet_ids = [x['subnet_id'] for x in all_subnets]
result = {}
result['subnets'] = ';'.join(subnet_ids)
result['rule_names'] = ';'.join(rule_names)
return result
def get_all_subnets(env, product, subnets):
environments = subnets['environments']
env_subnets_list_of_lists = [environment['subnets']
for environment in environments if environment['name'] == env]
applications = subnets['applications']
app_subnets_list_of_lists = [application['subnets']
for application in applications if application['name'] == product]
if len(env_subnets_list_of_lists) == 0 and len(app_subnets_list_of_lists) == 0:
# terraform will say "command "python3" failed with no error message"
# still better to fail here I think
print('No subnets found')
sys.exit(1)
env_subnets = env_subnets_list_of_lists[0] if len(
env_subnets_list_of_lists) > 0 else []
app_subs = app_subnets_list_of_lists[0] if len(
app_subnets_list_of_lists) > 0 else []
all_subnets = env_subnets + app_subs
return all_subnets
# always only one line from terraform
# {"env":"idam-aat","product":"idam-idm-aat", "github_token": "example"}
line = sys.stdin.readline()
query = json.loads(line)
subnets_filename = query['subnets_filename']
github_token = query['github_token']
url = 'https://raw.githubusercontent.com/hmcts/cnp-database-subnet-whitelisting/master/%s' % subnets_filename
req = urllib.request.Request(
url=url, headers={'Authorization': 'Bearer ' + github_token})
with urllib.request.urlopen(req) as f:
subnets_str = f.read().decode('utf-8')
subnets = json.loads(subnets_str)
env = query['env']
product = query['product']
result = find_subnet_rules(env, product, subnets)
print(json.dumps(result))
| 31.818182 | 109 | 0.682381 |
import sys
import json
import urllib.request
def find_subnet_rules(env, product, subnets):
all_subnets = get_all_subnets(env, product, subnets)
rule_names = [x['rule_name'] for x in all_subnets]
subnet_ids = [x['subnet_id'] for x in all_subnets]
result = {}
result['subnets'] = ';'.join(subnet_ids)
result['rule_names'] = ';'.join(rule_names)
return result
def get_all_subnets(env, product, subnets):
environments = subnets['environments']
env_subnets_list_of_lists = [environment['subnets']
for environment in environments if environment['name'] == env]
applications = subnets['applications']
app_subnets_list_of_lists = [application['subnets']
for application in applications if application['name'] == product]
if len(env_subnets_list_of_lists) == 0 and len(app_subnets_list_of_lists) == 0:
print('No subnets found')
sys.exit(1)
env_subnets = env_subnets_list_of_lists[0] if len(
env_subnets_list_of_lists) > 0 else []
app_subs = app_subnets_list_of_lists[0] if len(
app_subnets_list_of_lists) > 0 else []
all_subnets = env_subnets + app_subs
return all_subnets
line = sys.stdin.readline()
query = json.loads(line)
subnets_filename = query['subnets_filename']
github_token = query['github_token']
url = 'https://raw.githubusercontent.com/hmcts/cnp-database-subnet-whitelisting/master/%s' % subnets_filename
req = urllib.request.Request(
url=url, headers={'Authorization': 'Bearer ' + github_token})
with urllib.request.urlopen(req) as f:
subnets_str = f.read().decode('utf-8')
subnets = json.loads(subnets_str)
env = query['env']
product = query['product']
result = find_subnet_rules(env, product, subnets)
print(json.dumps(result))
| true | true |
f73b2d3deca443423c813c97d673a48963ca6f65 | 1,930 | py | Python | 1-99/50-59/50.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 1-99/50-59/50.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 1-99/50-59/50.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | """
Implement pow(x, n), which calculates x raised to the power n (x^n).
Example 1:
Input: 2.00000, 10, Output: 1024.00000
Example 2:
Input: 2.10000, 3, Output: 9.26100
Example 3:
Input: 2.00000, -2, Output: 0.25000, Explanation: 2-2 = 1/22 = 1/4 = 0.25
Note:
-100.0 < x < 100.0, n is a 32-bit signed integer, within the range [−2^31, 2^31 − 1]
"""
"""
Of course this is batteries included with Python. An alternative implementation is provided below. We cannot keep
multiplying in a range over n as n can be huge. Instead we realise that we can use binary decomposition but
representing powers instead of integers. For example bin(13)=b1101. We go through the binary representation backwards,
representing a power of 13 as powers of 1, 4, 8 multiplied together. The next power in the sequence can be trivially
calculated from the previous one (e.g. power of 8 = power by 4 * power by 4). We just have to watch out for
float precision constraints. Floats generally round out at 17dp so we check this after every iteration of the power
sequence and terminate early if appropriate. We also have to watch out for the negative sign if present.
"""
def my_pow(x, n):
if n < 0:
x = 1/x
sign = -1 if (x < 0 and n % 2) else 1
res, last_power = 1, 0
for pow_2, bit in enumerate(reversed(bin(abs(n))[2:])):
power = abs(x) if not pow_2 else last_power * last_power
last_power = power
if bit == '1':
res *= power
if 0 < last_power < 10**-17:
return 0
elif last_power > 10**17:
return float('inf') * sign
return res * sign
assert my_pow(10.0, 0) == 1
assert my_pow(10.0, 5) == 100000
assert my_pow(10.0, -1) == 0.1
assert my_pow(2.00000, 10) == 1024.00000
assert my_pow(2.10000, 3) == 9.261000000000001
assert my_pow(2.00000, -2) == 0.25000
assert my_pow(-2.0, 2) == 4.0
assert my_pow(-13.62608, 3) == -2529.9550389278597
| 36.415094 | 118 | 0.668394 |
def my_pow(x, n):
if n < 0:
x = 1/x
sign = -1 if (x < 0 and n % 2) else 1
res, last_power = 1, 0
for pow_2, bit in enumerate(reversed(bin(abs(n))[2:])):
power = abs(x) if not pow_2 else last_power * last_power
last_power = power
if bit == '1':
res *= power
if 0 < last_power < 10**-17:
return 0
elif last_power > 10**17:
return float('inf') * sign
return res * sign
assert my_pow(10.0, 0) == 1
assert my_pow(10.0, 5) == 100000
assert my_pow(10.0, -1) == 0.1
assert my_pow(2.00000, 10) == 1024.00000
assert my_pow(2.10000, 3) == 9.261000000000001
assert my_pow(2.00000, -2) == 0.25000
assert my_pow(-2.0, 2) == 4.0
assert my_pow(-13.62608, 3) == -2529.9550389278597
| true | true |
f73b2d57a08288cc1814ce291790dce1ed64cd3b | 37,605 | py | Python | keystone/tests/protection/v3/test_users.py | ferag/keystone | af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7 | [
"Apache-2.0"
] | null | null | null | keystone/tests/protection/v3/test_users.py | ferag/keystone | af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | keystone/tests/protection/v3/test_users.py | ferag/keystone | af1c1a822a8dfdd543c6e4d48264f5b8be2bdfc7 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_serialization import jsonutils
from six.moves import http_client
from keystone.common.policies import user as up
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _CommonUserTests(object):
"""Common default functionality for all users."""
def test_user_can_get_their_own_user_reference(self):
with self.test_client() as c:
r = c.get('/v3/users/%s' % self.user_id, headers=self.headers)
self.assertEqual(self.user_id, r.json['user']['id'])
class _SystemUserTests(object):
"""Common default functionality for all system users."""
def test_user_can_get_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users/%s' % user['id'], headers=self.headers)
self.assertEqual(user['id'], r.json['user']['id'])
def test_user_cannot_get_non_existent_user_not_found(self):
with self.test_client() as c:
c.get(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_list_users(self):
expected_user_ids = []
for _ in range(3):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
expected_user_ids.append(user['id'])
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
returned_user_ids = []
for user in r.json['users']:
returned_user_ids.append(user['id'])
for user_id in expected_user_ids:
self.assertIn(user_id, returned_user_ids)
class _SystemMemberAndReaderUserTests(object):
"""Common functionality for system readers and system members."""
def test_user_cannot_create_users(self):
create = {
'user': {
'name': uuid.uuid4().hex,
'domain': CONF.identity.default_domain_id
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
update = {'user': {'email': uuid.uuid4().hex}}
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _DomainUserTests(object):
"""Commont default functionality for all domain users."""
def test_user_can_get_user_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users/%s' % user['id'], headers=self.headers)
self.assertEqual(user['id'], r.json['user']['id'])
def test_user_cannot_get_user_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_list_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
self.assertEqual(2, len(r.json['users']))
user_ids = []
for user in r.json['users']:
user_ids.append(user['id'])
self.assertIn(self.user_id, user_ids)
self.assertIn(user['id'], user_ids)
def test_user_cannot_list_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
user_ids = []
for u in r.json['users']:
user_ids.append(u['id'])
self.assertNotIn(user['id'], user_ids)
class _DomainMemberAndReaderUserTests(object):
"""Functionality for all domain members and domain readers."""
def test_user_cannot_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _ProjectUserTests(object):
"""Common tests cases for all project users."""
def test_user_cannot_get_users_within_their_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_non_existent_user_forbidden(self):
with self.test_client() as c:
c.get(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_users_within_domain(self):
with self.test_client() as c:
c.get(
'/v3/users?domain_id=%s' % self.domain_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users?domain_id=%s' % domain['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests,
_SystemMemberAndReaderUserTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests,
_SystemMemberAndReaderUserTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_users(self):
create = {
'user': {
'name': uuid.uuid4().hex,
'domain': CONF.identity.default_domain_id
}
}
with self.test_client() as c:
c.post('/v3/users', json=create, headers=self.headers)
def test_user_can_update_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
def test_user_cannot_update_non_existent_user_not_found(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_delete_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
c.delete('/v3/users/%s' % user['id'], headers=self.headers)
def test_user_cannot_delete_non_existent_user_not_found(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
class DomainReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests,
_DomainMemberAndReaderUserTests):
def setUp(self):
super(DomainReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_reader = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_reader)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_reader['password'],
domain_id=self.domain_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests,
_DomainMemberAndReaderUserTests):
def setUp(self):
super(DomainMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_user = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_user['password'],
domain_id=self.domain_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests):
def setUp(self):
super(DomainAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_admin['password'],
domain_id=self.domain_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# TODO(lbragstad): Remove this once the deprecated policies in
# keystone.common.policies.users have been removed. This is only
# here to make sure we test the new policies instead of the deprecated
# ones. Oslo.policy will apply a logical OR to deprecated policies with
# new policies to maintain compatibility and give operators a chance to
# update permissions or update policies without breaking users. This
# will cause these specific tests to fail since we're trying to correct
# this broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER,
'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER,
'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN
}
f.write(jsonutils.dumps(overridden_policies))
def test_user_can_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post('/v3/users', json=create, headers=self.headers)
def test_user_cannot_create_users_within_domain_hyphened_domain_id(self):
# Finally, show that we can create a new user without any surprises.
# But if we specify a 'domain-id' instead of a 'domain_id', we get a
# Forbidden response because we fail a policy check before
# normalization occurs.
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain-id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
def test_user_can_update_users_within_domain_hyphened_domain_id(self):
# If we try updating the user's 'domain_id' by specifying a
# 'domain-id', then it'll be stored into extras rather than normalized,
# and the user's actual 'domain_id' is not affected.
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'domain-id': domain['id']}}
with self.test_client() as c:
r = c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
self.assertEqual(domain['id'], r.json['user']['domain-id'])
self.assertEqual(self.domain_id, r.json['user']['domain_id'])
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers
)
def test_user_cannot_delete_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class ProjectReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
project = unit.new_project_ref(domain_id=self.domain_id)
project = PROVIDERS.resource_api.create_project(project['id'], project)
self.project_id = project['id']
project_reader = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=project_reader['password'],
project_id=self.project_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
project = unit.new_project_ref(domain_id=self.domain_id)
project = PROVIDERS.resource_api.create_project(project['id'], project)
self.project_id = project['id']
project_member = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(project_member)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=project_member['password'],
project_id=self.project_id,
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# Grab a token using the persona we're testing and prepare headers
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# TODO(lbragstad): Remove this once the deprecated policies in
# keystone.common.policies.users have been removed. This is only
# here to make sure we test the new policies instead of the deprecated
# ones. Oslo.policy will OR deprecated policies with new policies to
# maintain compatibility and give operators a chance to update
# permissions or update policies without breaking users. This will
# cause these specific tests to fail since we're trying to correct this
# broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER,
'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER,
'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN
}
f.write(jsonutils.dumps(overridden_policies))
| 37.122409 | 79 | 0.60585 |
import uuid
from oslo_serialization import jsonutils
from six.moves import http_client
from keystone.common.policies import user as up
from keystone.common import provider_api
import keystone.conf
from keystone.tests.common import auth as common_auth
from keystone.tests import unit
from keystone.tests.unit import base_classes
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
class _CommonUserTests(object):
def test_user_can_get_their_own_user_reference(self):
with self.test_client() as c:
r = c.get('/v3/users/%s' % self.user_id, headers=self.headers)
self.assertEqual(self.user_id, r.json['user']['id'])
class _SystemUserTests(object):
def test_user_can_get_other_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users/%s' % user['id'], headers=self.headers)
self.assertEqual(user['id'], r.json['user']['id'])
def test_user_cannot_get_non_existent_user_not_found(self):
with self.test_client() as c:
c.get(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_list_users(self):
expected_user_ids = []
for _ in range(3):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
expected_user_ids.append(user['id'])
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
returned_user_ids = []
for user in r.json['users']:
returned_user_ids.append(user['id'])
for user_id in expected_user_ids:
self.assertIn(user_id, returned_user_ids)
class _SystemMemberAndReaderUserTests(object):
def test_user_cannot_create_users(self):
create = {
'user': {
'name': uuid.uuid4().hex,
'domain': CONF.identity.default_domain_id
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
update = {'user': {'email': uuid.uuid4().hex}}
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _DomainUserTests(object):
def test_user_can_get_user_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users/%s' % user['id'], headers=self.headers)
self.assertEqual(user['id'], r.json['user']['id'])
def test_user_cannot_get_user_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_list_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
self.assertEqual(2, len(r.json['users']))
user_ids = []
for user in r.json['users']:
user_ids.append(user['id'])
self.assertIn(self.user_id, user_ids)
self.assertIn(user['id'], user_ids)
def test_user_cannot_list_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
r = c.get('/v3/users', headers=self.headers)
user_ids = []
for u in r.json['users']:
user_ids.append(u['id'])
self.assertNotIn(user['id'], user_ids)
class _DomainMemberAndReaderUserTests(object):
def test_user_cannot_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class _ProjectUserTests(object):
def test_user_cannot_get_users_within_their_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_get_non_existent_user_forbidden(self):
with self.test_client() as c:
c.get(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_users_within_domain(self):
with self.test_client() as c:
c.get(
'/v3/users?domain_id=%s' % self.domain_id,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_list_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.get(
'/v3/users?domain_id=%s' % domain['id'],
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_users_in_other_domains(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class SystemReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests,
_SystemMemberAndReaderUserTests):
def setUp(self):
super(SystemReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_reader = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_reader
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.reader_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_reader['password'],
system=True
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests,
_SystemMemberAndReaderUserTests):
def setUp(self):
super(SystemMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
system_member = unit.new_user_ref(
domain_id=CONF.identity.default_domain_id
)
self.user_id = PROVIDERS.identity_api.create_user(
system_member
)['id']
PROVIDERS.assignment_api.create_system_grant_for_user(
self.user_id, self.bootstrapper.member_role_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=system_member['password'],
system=True
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class SystemAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_SystemUserTests):
def setUp(self):
super(SystemAdminTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
system=True
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def test_user_can_create_users(self):
create = {
'user': {
'name': uuid.uuid4().hex,
'domain': CONF.identity.default_domain_id
}
}
with self.test_client() as c:
c.post('/v3/users', json=create, headers=self.headers)
def test_user_can_update_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
def test_user_cannot_update_non_existent_user_not_found(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
def test_user_can_delete_users(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=CONF.identity.default_domain_id)
)
with self.test_client() as c:
c.delete('/v3/users/%s' % user['id'], headers=self.headers)
def test_user_cannot_delete_non_existent_user_not_found(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.NOT_FOUND
)
class DomainReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests,
_DomainMemberAndReaderUserTests):
def setUp(self):
super(DomainReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_reader = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_reader)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_reader['password'],
domain_id=self.domain_id,
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests,
_DomainMemberAndReaderUserTests):
def setUp(self):
super(DomainMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_user = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_user['password'],
domain_id=self.domain_id
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class DomainAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_DomainUserTests):
def setUp(self):
super(DomainAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
domain_admin = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.admin_role_id, user_id=self.user_id,
domain_id=self.domain_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=domain_admin['password'],
domain_id=self.domain_id,
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# this broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER,
'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER,
'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN
}
f.write(jsonutils.dumps(overridden_policies))
def test_user_can_create_users_within_domain(self):
create = {
'user': {
'domain_id': self.domain_id,
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post('/v3/users', json=create, headers=self.headers)
def test_user_cannot_create_users_within_domain_hyphened_domain_id(self):
# Finally, show that we can create a new user without any surprises.
# But if we specify a 'domain-id' instead of a 'domain_id', we get a
# Forbidden response because we fail a policy check before
# normalization occurs.
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain-id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_create_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
create = {
'user': {
'domain_id': domain['id'],
'name': uuid.uuid4().hex
}
}
with self.test_client() as c:
c.post(
'/v3/users', json=create, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_update_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
def test_user_can_update_users_within_domain_hyphened_domain_id(self):
# If we try updating the user's 'domain_id' by specifying a
# and the user's actual 'domain_id' is not affected.
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
update = {'user': {'domain-id': domain['id']}}
with self.test_client() as c:
r = c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers
)
self.assertEqual(domain['id'], r.json['user']['domain-id'])
self.assertEqual(self.domain_id, r.json['user']['domain_id'])
def test_user_cannot_update_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % user['id'], json=update, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_update_non_existent_user_forbidden(self):
update = {'user': {'email': uuid.uuid4().hex}}
with self.test_client() as c:
c.patch(
'/v3/users/%s' % uuid.uuid4().hex, json=update,
headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_can_delete_users_within_domain(self):
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=self.domain_id)
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers
)
def test_user_cannot_delete_users_in_other_domain(self):
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
user = PROVIDERS.identity_api.create_user(
unit.new_user_ref(domain_id=domain['id'])
)
with self.test_client() as c:
c.delete(
'/v3/users/%s' % user['id'], headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
def test_user_cannot_delete_non_existent_user_forbidden(self):
with self.test_client() as c:
c.delete(
'/v3/users/%s' % uuid.uuid4().hex, headers=self.headers,
expected_status_code=http_client.FORBIDDEN
)
class ProjectReaderTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectReaderTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
project = unit.new_project_ref(domain_id=self.domain_id)
project = PROVIDERS.resource_api.create_project(project['id'], project)
self.project_id = project['id']
project_reader = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.reader_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=project_reader['password'],
project_id=self.project_id,
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectMemberTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectMemberTests, self).setUp()
self.loadapp()
self.useFixture(ksfixtures.Policy(self.config_fixture))
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
project = unit.new_project_ref(domain_id=self.domain_id)
project = PROVIDERS.resource_api.create_project(project['id'], project)
self.project_id = project['id']
project_member = unit.new_user_ref(domain_id=self.domain_id)
self.user_id = PROVIDERS.identity_api.create_user(project_member)['id']
PROVIDERS.assignment_api.create_grant(
self.bootstrapper.member_role_id, user_id=self.user_id,
project_id=self.project_id
)
auth = self.build_authentication_request(
user_id=self.user_id, password=project_member['password'],
project_id=self.project_id,
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
class ProjectAdminTests(base_classes.TestCaseWithBootstrap,
common_auth.AuthTestMixin,
_CommonUserTests,
_ProjectUserTests):
def setUp(self):
super(ProjectAdminTests, self).setUp()
self.loadapp()
self.policy_file = self.useFixture(temporaryfile.SecureTempFile())
self.policy_file_name = self.policy_file.file_name
self.useFixture(
ksfixtures.Policy(
self.config_fixture, policy_file=self.policy_file_name
)
)
self._override_policy()
self.config_fixture.config(group='oslo_policy', enforce_scope=True)
domain = PROVIDERS.resource_api.create_domain(
uuid.uuid4().hex, unit.new_domain_ref()
)
self.domain_id = domain['id']
self.user_id = self.bootstrapper.admin_user_id
auth = self.build_authentication_request(
user_id=self.user_id,
password=self.bootstrapper.admin_password,
project_id=self.bootstrapper.project_id
)
# for requests we'll be making in the tests.
with self.test_client() as c:
r = c.post('/v3/auth/tokens', json=auth)
self.token_id = r.headers['X-Subject-Token']
self.headers = {'X-Auth-Token': self.token_id}
def _override_policy(self):
# broken behavior with better scope checking.
with open(self.policy_file_name, 'w') as f:
overridden_policies = {
'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER,
'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER,
'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN,
'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN
}
f.write(jsonutils.dumps(overridden_policies))
| true | true |
f73b2d639d0e65b873732c9f73125c0faa298413 | 1,867 | py | Python | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | hooks/post_gen_project.py | Everyday-Future/cookiecutter-devops | 316948eed75c95ac5768a1bf1d504807069aaa6a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import stat
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == '__main__':
if 'no' in '{{ cookiecutter.command_line_interface|lower }}':
cli_file = os.path.join('{{ cookiecutter.project_slug }}', 'cli.py')
remove_file(cli_file)
if 'Not open source' == '{{ cookiecutter.open_source_license }}':
remove_file('LICENSE')
# Create secret envs
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', '.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', '.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env'))
# Convert shell scripts for Windows
shell_scripts = [os.path.join(PROJECT_DIRECTORY, '.__run_cli.sh'),
os.path.join(PROJECT_DIRECTORY, 'boot.sh'),
os.path.join(PROJECT_DIRECTORY, 'host', 'test_loop.sh')]
for shell_script in shell_scripts:
with open(shell_script, "r") as fin:
lines = []
for line in fin:
lines.append(line.replace('\r\n', '\n'))
with open(shell_script, "w") as fout:
for line in lines:
fout.write(line)
# Make shell scripts executable
for shell_script in shell_scripts:
st = os.stat(shell_script)
os.chmod(shell_script, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
| 38.102041 | 87 | 0.637386 |
import os
import stat
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
def remove_file(filepath):
os.remove(os.path.join(PROJECT_DIRECTORY, filepath))
if __name__ == '__main__':
if 'no' in '{{ cookiecutter.command_line_interface|lower }}':
cli_file = os.path.join('{{ cookiecutter.project_slug }}', 'cli.py')
remove_file(cli_file)
if 'Not open source' == '{{ cookiecutter.open_source_license }}':
remove_file('LICENSE')
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'secret--template-values.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', '.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', '.env'))
os.rename(os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env.txt'),
os.path.join(PROJECT_DIRECTORY, 'frontend', 'docker.env'))
shell_scripts = [os.path.join(PROJECT_DIRECTORY, '.__run_cli.sh'),
os.path.join(PROJECT_DIRECTORY, 'boot.sh'),
os.path.join(PROJECT_DIRECTORY, 'host', 'test_loop.sh')]
for shell_script in shell_scripts:
with open(shell_script, "r") as fin:
lines = []
for line in fin:
lines.append(line.replace('\r\n', '\n'))
with open(shell_script, "w") as fout:
for line in lines:
fout.write(line)
for shell_script in shell_scripts:
st = os.stat(shell_script)
os.chmod(shell_script, st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
| true | true |
f73b2fa14400f6635610f8c0917767fe75628726 | 14,043 | py | Python | bin/workshop_check.py | mesfind/2018-06-07-Ethiopia | fcd1cff277a84e03b449df6674a966c862f83682 | [
"CC-BY-4.0"
] | null | null | null | bin/workshop_check.py | mesfind/2018-06-07-Ethiopia | fcd1cff277a84e03b449df6674a966c862f83682 | [
"CC-BY-4.0"
] | null | null | null | bin/workshop_check.py | mesfind/2018-06-07-Ethiopia | fcd1cff277a84e03b449df6674a966c862f83682 | [
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
'''Check that a workshop's index.html metadata is valid. See the
docstrings on the checking functions for a summary of the checks.
'''
from __future__ import print_function
import sys
import os
import re
from datetime import date
from util import Reporter, split_metadata, load_yaml, check_unwanted_files
# Metadata field patterns.
EMAIL_PATTERN = r'[^@]+@[^@]+\.[^@]+'
HUMANTIME_PATTERN = r'((0?[1-9]|1[0-2]):[0-5]\d(am|pm)(-|to)(0?[1-9]|1[0-2]):[0-5]\d(am|pm))|((0?\d|1\d|2[0-3]):[0-5]\d(-|to)(0?\d|1\d|2[0-3]):[0-5]\d)'
EVENTBRITE_PATTERN = r'\d{9,10}'
URL_PATTERN = r'https?://.+'
# Defaults.
CARPENTRIES = ("dc", "swc")
DEFAULT_CONTACT_EMAIL = 'admin@software-carpentry.org'
USAGE = 'Usage: "check-workshop path/to/root/directory"'
# Country and language codes. Note that codes mean different things: 'ar'
# is 'Arabic' as a language but 'Argentina' as a country.
ISO_COUNTRY = [
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar', 'as',
'at', 'au', 'aw', 'ax', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg', 'bh',
'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw', 'by', 'bz',
'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl', 'cm', 'cn', 'co',
'cr', 'cu', 'cv', 'cx', 'cy', 'cz', 'de', 'dj', 'dk', 'dm', 'do', 'dz',
'ec', 'ee', 'eg', 'eh', 'er', 'es', 'et', 'eu', 'fi', 'fj', 'fk', 'fm',
'fo', 'fr', 'ga', 'gb', 'gd', 'ge', 'gf', 'gg', 'gh', 'gi', 'gl', 'gm',
'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu', 'gw', 'gy', 'hk', 'hm', 'hn',
'hr', 'ht', 'hu', 'id', 'ie', 'il', 'im', 'in', 'io', 'iq', 'ir', 'is',
'it', 'je', 'jm', 'jo', 'jp', 'ke', 'kg', 'kh', 'ki', 'km', 'kn', 'kp',
'kr', 'kw', 'ky', 'kz', 'la', 'lb', 'lc', 'li', 'lk', 'lr', 'ls', 'lt',
'lu', 'lv', 'ly', 'ma', 'mc', 'md', 'me', 'mg', 'mh', 'mk', 'ml', 'mm',
'mn', 'mo', 'mp', 'mq', 'mr', 'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my',
'mz', 'na', 'nc', 'ne', 'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu',
'nz', 'om', 'pa', 'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr',
'ps', 'pt', 'pw', 'py', 'qa', 're', 'ro', 'rs', 'ru', 'rw', 'sa', 'sb',
'sc', 'sd', 'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so',
'sr', 'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug', 'um',
'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn', 'vu', 'wf', 'ws',
'ye', 'yt', 'za', 'zm', 'zw'
]
ISO_LANGUAGE = [
'aa', 'ab', 'ae', 'af', 'ak', 'am', 'an', 'ar', 'as', 'av', 'ay', 'az',
'ba', 'be', 'bg', 'bh', 'bi', 'bm', 'bn', 'bo', 'br', 'bs', 'ca', 'ce',
'ch', 'co', 'cr', 'cs', 'cu', 'cv', 'cy', 'da', 'de', 'dv', 'dz', 'ee',
'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'ff', 'fi', 'fj', 'fo', 'fr',
'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'gv', 'ha', 'he', 'hi', 'ho', 'hr',
'ht', 'hu', 'hy', 'hz', 'ia', 'id', 'ie', 'ig', 'ii', 'ik', 'io', 'is',
'it', 'iu', 'ja', 'jv', 'ka', 'kg', 'ki', 'kj', 'kk', 'kl', 'km', 'kn',
'ko', 'kr', 'ks', 'ku', 'kv', 'kw', 'ky', 'la', 'lb', 'lg', 'li', 'ln',
'lo', 'lt', 'lu', 'lv', 'mg', 'mh', 'mi', 'mk', 'ml', 'mn', 'mr', 'ms',
'mt', 'my', 'na', 'nb', 'nd', 'ne', 'ng', 'nl', 'nn', 'no', 'nr', 'nv',
'ny', 'oc', 'oj', 'om', 'or', 'os', 'pa', 'pi', 'pl', 'ps', 'pt', 'qu',
'rm', 'rn', 'ro', 'ru', 'rw', 'sa', 'sc', 'sd', 'se', 'sg', 'si', 'sk',
'sl', 'sm', 'sn', 'so', 'sq', 'sr', 'ss', 'st', 'su', 'sv', 'sw', 'ta',
'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tn', 'to', 'tr', 'ts', 'tt', 'tw',
'ty', 'ug', 'uk', 'ur', 'uz', 've', 'vi', 'vo', 'wa', 'wo', 'xh', 'yi',
'yo', 'za', 'zh', 'zu'
]
def look_for_fixme(func):
"""Decorator to fail test if text argument starts with "FIXME"."""
def inner(arg):
if (arg is not None) and \
isinstance(arg, str) and \
arg.lstrip().startswith('FIXME'):
return False
return func(arg)
return inner
@look_for_fixme
def check_layout(layout):
'''"layout" in YAML header must be "workshop".'''
return layout == 'workshop'
@look_for_fixme
def check_carpentry(layout):
'''"carpentry" in YAML header must be "dc" or "swc".'''
return layout in CARPENTRIES
@look_for_fixme
def check_country(country):
'''"country" must be a lowercase ISO-3166 two-letter code.'''
return country in ISO_COUNTRY
@look_for_fixme
def check_language(language):
'''"language" must be a lowercase ISO-639 two-letter code.'''
return language in ISO_LANGUAGE
@look_for_fixme
def check_humandate(date):
"""
'humandate' must be a human-readable date with a 3-letter month
and 4-digit year. Examples include 'Feb 18-20, 2025' and 'Feb 18
and 20, 2025'. It may be in languages other than English, but the
month name should be kept short to aid formatting of the main
Software Carpentry web site.
"""
if ',' not in date:
return False
month_dates, year = date.split(',')
# The first three characters of month_dates are not empty
month = month_dates[:3]
if any(char == ' ' for char in month):
return False
# But the fourth character is empty ("February" is illegal)
if month_dates[3] != ' ':
return False
# year contains *only* numbers
try:
int(year)
except:
return False
return True
@look_for_fixme
def check_humantime(time):
"""
'humantime' is a human-readable start and end time for the
workshop, such as '09:00 - 16:00'.
"""
return bool(re.match(HUMANTIME_PATTERN, time.replace(' ', '')))
def check_date(this_date):
"""
'startdate' and 'enddate' are machine-readable start and end dates
for the workshop, and must be in YYYY-MM-DD format, e.g.,
'2015-07-01'.
"""
# YAML automatically loads valid dates as datetime.date.
return isinstance(this_date, date)
@look_for_fixme
def check_latitude_longitude(latlng):
"""
'latlng' must be a valid latitude and longitude represented as two
floating-point numbers separated by a comma.
"""
try:
lat, lng = latlng.split(',')
lat = float(lat)
long = float(lng)
return (-90.0 <= lat <= 90.0) and (-180.0 <= long <= 180.0)
except ValueError:
return False
def check_instructors(instructors):
"""
'instructor' must be a non-empty comma-separated list of quoted
names, e.g. ['First name', 'Second name', ...']. Do not use 'TBD'
or other placeholders.
"""
# YAML automatically loads list-like strings as lists.
return isinstance(instructors, list) and len(instructors) > 0
def check_helpers(helpers):
"""
'helper' must be a comma-separated list of quoted names,
e.g. ['First name', 'Second name', ...']. The list may be empty.
Do not use 'TBD' or other placeholders.
"""
# YAML automatically loads list-like strings as lists.
return isinstance(helpers, list) and len(helpers) >= 0
@look_for_fixme
def check_emails(emails):
"""
'contact' must be a comma-separated list of valid email addresses.
The list may be empty. A valid email address consists of characters,
an '@', and more characters. It should not contain the default contact
email address 'admin@software-carpentry.org'.
"""
# YAML automatically loads list-like strings as lists.
if (isinstance(emails, list) and len(emails) >= 0):
for email in emails:
if ((not bool(re.match(EMAIL_PATTERN, email))) or (email == DEFAULT_CONTACT_EMAIL)):
return False
else:
return False
return True
def check_eventbrite(eventbrite):
"""
'eventbrite' (the Eventbrite registration key) must be 9 or more
digits. It may appear as an integer or as a string.
"""
if isinstance(eventbrite, int):
return True
else:
return bool(re.match(EVENTBRITE_PATTERN, eventbrite))
@look_for_fixme
def check_collaborative_notes(collaborative_notes):
"""
'collaborative_notes' must be a valid URL.
"""
return bool(re.match(URL_PATTERN, collaborative_notes))
@look_for_fixme
def check_pass(value):
"""
This test always passes (it is used for 'checking' things like the
workshop address, for which no sensible validation is feasible).
"""
return True
HANDLERS = {
'layout': (True, check_layout, 'layout isn\'t "workshop"'),
'carpentry': (True, check_carpentry, 'carpentry isn\'t in ' +
', '.join(CARPENTRIES)),
'country': (True, check_country,
'country invalid: must use lowercase two-letter ISO code ' +
'from ' + ', '.join(ISO_COUNTRY)),
'language': (False, check_language,
'language invalid: must use lowercase two-letter ISO code' +
' from ' + ', '.join(ISO_LANGUAGE)),
'humandate': (True, check_humandate,
'humandate invalid. Please use three-letter months like ' +
'"Jan" and four-letter years like "2025"'),
'humantime': (True, check_humantime,
'humantime doesn\'t include numbers'),
'startdate': (True, check_date,
'startdate invalid. Must be of format year-month-day, ' +
'i.e., 2014-01-31'),
'enddate': (False, check_date,
'enddate invalid. Must be of format year-month-day, i.e.,' +
' 2014-01-31'),
'latlng': (True, check_latitude_longitude,
'latlng invalid. Check that it is two floating point ' +
'numbers, separated by a comma'),
'organizer': (True, check_instructors,
'instructor list isn\'t a valid list of format ' +
'["First instructor", "Second instructor",..]'),
'speaker': (True, check_helper,
'helper list isn\'t a valid list of format ' +
'["First helper", "Second helper",..]'),
'contact': (True, check_emails,
'contact email list isn\'t a valid list of format ' +
'["me@example.org", "you@example.org",..] or contains incorrectly formatted email addresses or ' +
'"{0}".'.format(DEFAULT_CONTACT_EMAIL)),
'eventbrite': (False, check_eventbrite, 'Eventbrite key appears invalid'),
'collaborative_notes': (False, check_collaborative_notes, 'Collaborative Notes URL appears invalid'),
'venue': (False, check_pass, 'venue name not specified'),
'address': (False, check_pass, 'address not specified')
}
# REQUIRED is all required categories.
REQUIRED = set([k for k in HANDLERS if HANDLERS[k][0]])
# OPTIONAL is all optional categories.
OPTIONAL = set([k for k in HANDLERS if not HANDLERS[k][0]])
def check_blank_lines(reporter, raw):
"""
Blank lines are not allowed in category headers.
"""
lines = [(i, x) for (i, x) in enumerate(raw.strip().split('\n')) if not x.strip()]
reporter.check(not lines,
None,
'Blank line(s) in header: {0}',
', '.join(["{0}: {1}".format(i, x.rstrip()) for (i, x) in lines]))
def check_categories(reporter, left, right, msg):
"""
Report differences (if any) between two sets of categories.
"""
diff = left - right
reporter.check(len(diff) == 0,
None,
'{0}: offending entries {1}',
msg, sorted(list(diff)))
def check_file(reporter, path, data):
"""
Get header from file, call all other functions, and check file for
validity.
"""
# Get metadata as text and as YAML.
raw, header, body = split_metadata(path, data)
# Do we have any blank lines in the header?
check_blank_lines(reporter, raw)
# Look through all header entries. If the category is in the input
# file and is either required or we have actual data (as opposed to
# a commented-out entry), we check it. If it *isn't* in the header
# but is required, report an error.
for category in HANDLERS:
required, handler, message = HANDLERS[category]
if category in header:
if required or header[category]:
reporter.check(handler(header[category]),
None,
'{0}\n actual value "{1}"',
message, header[category])
elif required:
reporter.add(None,
'Missing mandatory key "{0}"',
category)
# Check whether we have missing or too many categories
seen_categories = set(header.keys())
check_categories(reporter, REQUIRED, seen_categories,
'Missing categories')
check_categories(reporter, seen_categories, REQUIRED.union(OPTIONAL),
'Superfluous categories')
def check_config(reporter, filename):
"""
Check YAML configuration file.
"""
config = load_yaml(filename)
kind = config.get('kind', None)
reporter.check(kind == 'workshop',
filename,
'Missing or unknown kind of event: {0}',
kind)
carpentry = config.get('carpentry', None)
reporter.check(carpentry in ('swc', 'dc'),
filename,
'Missing or unknown carpentry: {0}',
carpentry)
def main():
'''Run as the main program.'''
if len(sys.argv) != 2:
print(USAGE, file=sys.stderr)
sys.exit(1)
root_dir = sys.argv[1]
index_file = os.path.join(root_dir, 'index.html')
config_file = os.path.join(root_dir, '_config.yml')
reporter = Reporter()
check_config(reporter, config_file)
check_unwanted_files(root_dir, reporter)
with open(index_file) as reader:
data = reader.read()
check_file(reporter, index_file, data)
reporter.report()
if __name__ == '__main__':
main()
| 33.356295 | 152 | 0.553087 |
from __future__ import print_function
import sys
import os
import re
from datetime import date
from util import Reporter, split_metadata, load_yaml, check_unwanted_files
EMAIL_PATTERN = r'[^@]+@[^@]+\.[^@]+'
HUMANTIME_PATTERN = r'((0?[1-9]|1[0-2]):[0-5]\d(am|pm)(-|to)(0?[1-9]|1[0-2]):[0-5]\d(am|pm))|((0?\d|1\d|2[0-3]):[0-5]\d(-|to)(0?\d|1\d|2[0-3]):[0-5]\d)'
EVENTBRITE_PATTERN = r'\d{9,10}'
URL_PATTERN = r'https?://.+'
CARPENTRIES = ("dc", "swc")
DEFAULT_CONTACT_EMAIL = 'admin@software-carpentry.org'
USAGE = 'Usage: "check-workshop path/to/root/directory"'
ISO_COUNTRY = [
'ad', 'ae', 'af', 'ag', 'ai', 'al', 'am', 'an', 'ao', 'aq', 'ar', 'as',
'at', 'au', 'aw', 'ax', 'az', 'ba', 'bb', 'bd', 'be', 'bf', 'bg', 'bh',
'bi', 'bj', 'bm', 'bn', 'bo', 'br', 'bs', 'bt', 'bv', 'bw', 'by', 'bz',
'ca', 'cc', 'cd', 'cf', 'cg', 'ch', 'ci', 'ck', 'cl', 'cm', 'cn', 'co',
'cr', 'cu', 'cv', 'cx', 'cy', 'cz', 'de', 'dj', 'dk', 'dm', 'do', 'dz',
'ec', 'ee', 'eg', 'eh', 'er', 'es', 'et', 'eu', 'fi', 'fj', 'fk', 'fm',
'fo', 'fr', 'ga', 'gb', 'gd', 'ge', 'gf', 'gg', 'gh', 'gi', 'gl', 'gm',
'gn', 'gp', 'gq', 'gr', 'gs', 'gt', 'gu', 'gw', 'gy', 'hk', 'hm', 'hn',
'hr', 'ht', 'hu', 'id', 'ie', 'il', 'im', 'in', 'io', 'iq', 'ir', 'is',
'it', 'je', 'jm', 'jo', 'jp', 'ke', 'kg', 'kh', 'ki', 'km', 'kn', 'kp',
'kr', 'kw', 'ky', 'kz', 'la', 'lb', 'lc', 'li', 'lk', 'lr', 'ls', 'lt',
'lu', 'lv', 'ly', 'ma', 'mc', 'md', 'me', 'mg', 'mh', 'mk', 'ml', 'mm',
'mn', 'mo', 'mp', 'mq', 'mr', 'ms', 'mt', 'mu', 'mv', 'mw', 'mx', 'my',
'mz', 'na', 'nc', 'ne', 'nf', 'ng', 'ni', 'nl', 'no', 'np', 'nr', 'nu',
'nz', 'om', 'pa', 'pe', 'pf', 'pg', 'ph', 'pk', 'pl', 'pm', 'pn', 'pr',
'ps', 'pt', 'pw', 'py', 'qa', 're', 'ro', 'rs', 'ru', 'rw', 'sa', 'sb',
'sc', 'sd', 'se', 'sg', 'sh', 'si', 'sj', 'sk', 'sl', 'sm', 'sn', 'so',
'sr', 'st', 'sv', 'sy', 'sz', 'tc', 'td', 'tf', 'tg', 'th', 'tj', 'tk',
'tl', 'tm', 'tn', 'to', 'tr', 'tt', 'tv', 'tw', 'tz', 'ua', 'ug', 'um',
'us', 'uy', 'uz', 'va', 'vc', 've', 'vg', 'vi', 'vn', 'vu', 'wf', 'ws',
'ye', 'yt', 'za', 'zm', 'zw'
]
ISO_LANGUAGE = [
'aa', 'ab', 'ae', 'af', 'ak', 'am', 'an', 'ar', 'as', 'av', 'ay', 'az',
'ba', 'be', 'bg', 'bh', 'bi', 'bm', 'bn', 'bo', 'br', 'bs', 'ca', 'ce',
'ch', 'co', 'cr', 'cs', 'cu', 'cv', 'cy', 'da', 'de', 'dv', 'dz', 'ee',
'el', 'en', 'eo', 'es', 'et', 'eu', 'fa', 'ff', 'fi', 'fj', 'fo', 'fr',
'fy', 'ga', 'gd', 'gl', 'gn', 'gu', 'gv', 'ha', 'he', 'hi', 'ho', 'hr',
'ht', 'hu', 'hy', 'hz', 'ia', 'id', 'ie', 'ig', 'ii', 'ik', 'io', 'is',
'it', 'iu', 'ja', 'jv', 'ka', 'kg', 'ki', 'kj', 'kk', 'kl', 'km', 'kn',
'ko', 'kr', 'ks', 'ku', 'kv', 'kw', 'ky', 'la', 'lb', 'lg', 'li', 'ln',
'lo', 'lt', 'lu', 'lv', 'mg', 'mh', 'mi', 'mk', 'ml', 'mn', 'mr', 'ms',
'mt', 'my', 'na', 'nb', 'nd', 'ne', 'ng', 'nl', 'nn', 'no', 'nr', 'nv',
'ny', 'oc', 'oj', 'om', 'or', 'os', 'pa', 'pi', 'pl', 'ps', 'pt', 'qu',
'rm', 'rn', 'ro', 'ru', 'rw', 'sa', 'sc', 'sd', 'se', 'sg', 'si', 'sk',
'sl', 'sm', 'sn', 'so', 'sq', 'sr', 'ss', 'st', 'su', 'sv', 'sw', 'ta',
'te', 'tg', 'th', 'ti', 'tk', 'tl', 'tn', 'to', 'tr', 'ts', 'tt', 'tw',
'ty', 'ug', 'uk', 'ur', 'uz', 've', 'vi', 'vo', 'wa', 'wo', 'xh', 'yi',
'yo', 'za', 'zh', 'zu'
]
def look_for_fixme(func):
def inner(arg):
if (arg is not None) and \
isinstance(arg, str) and \
arg.lstrip().startswith('FIXME'):
return False
return func(arg)
return inner
@look_for_fixme
def check_layout(layout):
return layout == 'workshop'
@look_for_fixme
def check_carpentry(layout):
return layout in CARPENTRIES
@look_for_fixme
def check_country(country):
return country in ISO_COUNTRY
@look_for_fixme
def check_language(language):
return language in ISO_LANGUAGE
@look_for_fixme
def check_humandate(date):
if ',' not in date:
return False
month_dates, year = date.split(',')
month = month_dates[:3]
if any(char == ' ' for char in month):
return False
if month_dates[3] != ' ':
return False
try:
int(year)
except:
return False
return True
@look_for_fixme
def check_humantime(time):
return bool(re.match(HUMANTIME_PATTERN, time.replace(' ', '')))
def check_date(this_date):
return isinstance(this_date, date)
@look_for_fixme
def check_latitude_longitude(latlng):
try:
lat, lng = latlng.split(',')
lat = float(lat)
long = float(lng)
return (-90.0 <= lat <= 90.0) and (-180.0 <= long <= 180.0)
except ValueError:
return False
def check_instructors(instructors):
return isinstance(instructors, list) and len(instructors) > 0
def check_helpers(helpers):
return isinstance(helpers, list) and len(helpers) >= 0
@look_for_fixme
def check_emails(emails):
if (isinstance(emails, list) and len(emails) >= 0):
for email in emails:
if ((not bool(re.match(EMAIL_PATTERN, email))) or (email == DEFAULT_CONTACT_EMAIL)):
return False
else:
return False
return True
def check_eventbrite(eventbrite):
if isinstance(eventbrite, int):
return True
else:
return bool(re.match(EVENTBRITE_PATTERN, eventbrite))
@look_for_fixme
def check_collaborative_notes(collaborative_notes):
return bool(re.match(URL_PATTERN, collaborative_notes))
@look_for_fixme
def check_pass(value):
return True
HANDLERS = {
'layout': (True, check_layout, 'layout isn\'t "workshop"'),
'carpentry': (True, check_carpentry, 'carpentry isn\'t in ' +
', '.join(CARPENTRIES)),
'country': (True, check_country,
'country invalid: must use lowercase two-letter ISO code ' +
'from ' + ', '.join(ISO_COUNTRY)),
'language': (False, check_language,
'language invalid: must use lowercase two-letter ISO code' +
' from ' + ', '.join(ISO_LANGUAGE)),
'humandate': (True, check_humandate,
'humandate invalid. Please use three-letter months like ' +
'"Jan" and four-letter years like "2025"'),
'humantime': (True, check_humantime,
'humantime doesn\'t include numbers'),
'startdate': (True, check_date,
'startdate invalid. Must be of format year-month-day, ' +
'i.e., 2014-01-31'),
'enddate': (False, check_date,
'enddate invalid. Must be of format year-month-day, i.e.,' +
' 2014-01-31'),
'latlng': (True, check_latitude_longitude,
'latlng invalid. Check that it is two floating point ' +
'numbers, separated by a comma'),
'organizer': (True, check_instructors,
'instructor list isn\'t a valid list of format ' +
'["First instructor", "Second instructor",..]'),
'speaker': (True, check_helper,
'helper list isn\'t a valid list of format ' +
'["First helper", "Second helper",..]'),
'contact': (True, check_emails,
'contact email list isn\'t a valid list of format ' +
'["me@example.org", "you@example.org",..] or contains incorrectly formatted email addresses or ' +
'"{0}".'.format(DEFAULT_CONTACT_EMAIL)),
'eventbrite': (False, check_eventbrite, 'Eventbrite key appears invalid'),
'collaborative_notes': (False, check_collaborative_notes, 'Collaborative Notes URL appears invalid'),
'venue': (False, check_pass, 'venue name not specified'),
'address': (False, check_pass, 'address not specified')
}
REQUIRED = set([k for k in HANDLERS if HANDLERS[k][0]])
OPTIONAL = set([k for k in HANDLERS if not HANDLERS[k][0]])
def check_blank_lines(reporter, raw):
lines = [(i, x) for (i, x) in enumerate(raw.strip().split('\n')) if not x.strip()]
reporter.check(not lines,
None,
'Blank line(s) in header: {0}',
', '.join(["{0}: {1}".format(i, x.rstrip()) for (i, x) in lines]))
def check_categories(reporter, left, right, msg):
diff = left - right
reporter.check(len(diff) == 0,
None,
'{0}: offending entries {1}',
msg, sorted(list(diff)))
def check_file(reporter, path, data):
raw, header, body = split_metadata(path, data)
check_blank_lines(reporter, raw)
# but is required, report an error.
for category in HANDLERS:
required, handler, message = HANDLERS[category]
if category in header:
if required or header[category]:
reporter.check(handler(header[category]),
None,
'{0}\n actual value "{1}"',
message, header[category])
elif required:
reporter.add(None,
'Missing mandatory key "{0}"',
category)
# Check whether we have missing or too many categories
seen_categories = set(header.keys())
check_categories(reporter, REQUIRED, seen_categories,
'Missing categories')
check_categories(reporter, seen_categories, REQUIRED.union(OPTIONAL),
'Superfluous categories')
def check_config(reporter, filename):
config = load_yaml(filename)
kind = config.get('kind', None)
reporter.check(kind == 'workshop',
filename,
'Missing or unknown kind of event: {0}',
kind)
carpentry = config.get('carpentry', None)
reporter.check(carpentry in ('swc', 'dc'),
filename,
'Missing or unknown carpentry: {0}',
carpentry)
def main():
if len(sys.argv) != 2:
print(USAGE, file=sys.stderr)
sys.exit(1)
root_dir = sys.argv[1]
index_file = os.path.join(root_dir, 'index.html')
config_file = os.path.join(root_dir, '_config.yml')
reporter = Reporter()
check_config(reporter, config_file)
check_unwanted_files(root_dir, reporter)
with open(index_file) as reader:
data = reader.read()
check_file(reporter, index_file, data)
reporter.report()
if __name__ == '__main__':
main()
| true | true |
f73b3026a7a9b6177f3c548ce4ad20c13608b6b1 | 19,450 | py | Python | aredis/pool.py | eoghanmurray/aredis | e0ddfea1c6e21219aca9f67b10160bc380540fbf | [
"MIT"
] | null | null | null | aredis/pool.py | eoghanmurray/aredis | e0ddfea1c6e21219aca9f67b10160bc380540fbf | [
"MIT"
] | null | null | null | aredis/pool.py | eoghanmurray/aredis | e0ddfea1c6e21219aca9f67b10160bc380540fbf | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import asyncio
import os
import warnings
import time
import random
import threading
from itertools import chain
from urllib.parse import (parse_qs,
unquote,
urlparse)
from aredis.connection import (RedisSSLContext,
Connection,
UnixDomainSocketConnection,
ClusterConnection)
from aredis.nodemanager import NodeManager
from aredis.exceptions import (ConnectionError,
RedisClusterException)
FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
def to_bool(value):
if value is None or value == '':
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
'stream_timeout': float,
'connect_timeout': float,
'retry_on_timeout': to_bool
}
class ConnectionPool(object):
"""Generic connection pool"""
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
"""
Returns a connection pool configured from the given URL.
For example::
redis://[:password]@localhost:6379/0
rediss://[:password]@localhost:6379/0
unix://[:password]@/path/to/socket.sock?db=0
Three URL schemes are supported:
- ```redis://``
<http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a
normal TCP socket connection
- ```rediss://``
<http://www.iana.org/assignments/uri-schemes/prov/rediss>`_ creates a
SSL wrapped TCP socket connection
- ``unix://`` creates a Unix Domain Socket connection
There are several ways to specify a database number. The parse function
will return the first specified option:
1. A ``db`` querystring option, e.g. redis://localhost?db=0
2. If using the redis:// scheme, the path argument of the url, e.g.
redis://localhost/0
3. The ``db`` argument to this function.
If none of these options are specified, db=0 is used.
The ``decode_components`` argument allows this function to work with
percent-encoded URLs. If this argument is set to ``True`` all ``%xx``
escapes will be replaced by their single-character equivalents after
the URL has been parsed. This only applies to the ``hostname``,
``path``, and ``password`` components.
Any additional querystring arguments and keyword arguments will be
passed along to the ConnectionPool class's initializer. The querystring
arguments ``connect_timeout`` and ``stream_timeout`` if supplied
are parsed as float values. The arguments ``retry_on_timeout`` are
parsed to boolean values that accept True/False, Yes/No values to indicate state.
Invalid types cause a ``UserWarning`` to be raised.
In the case of conflicting arguments, querystring arguments always win.
"""
url = urlparse(url)
qs = url.query
url_options = {}
for name, value in iter(parse_qs(qs).items()):
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
warnings.warn(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
# We only support redis:// and unix:// schemes.
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# If there's a path argument, use it as the db argument if a
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
keyfile = url_options.pop('ssl_keyfile', None)
certfile = url_options.pop('ssl_certfile', None)
cert_reqs = url_options.pop('ssl_cert_reqs', None)
ca_certs = url_options.pop('ssl_ca_certs', None)
url_options['ssl_context'] = RedisSSLContext(keyfile, certfile, cert_reqs, ca_certs).get()
# last shot at the db value
url_options['db'] = int(url_options.get('db', db or 0))
# update the arguments from the URL values
kwargs.update(url_options)
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
max_idle_time=0, idle_check_interval=1,
**connection_kwargs):
"""
Creates a connection pool. If max_connections is set, then this
object raises redis.ConnectionError when the pool's limit is reached.
By default, TCP connections are created connection_class is specified.
Use redis.UnixDomainSocketConnection for unix sockets.
Any additional keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.max_idle_time = max_idle_time
self.idle_check_interval = idle_check_interval
self.loop = self.connection_kwargs.get('loop')
self.reset()
def __repr__(self):
return '{}<{}>'.format(
type(self).__name__,
self.connection_class.description.format(**self.connection_kwargs),
)
async def disconnect_on_idle_time_exceeded(self, connection):
while True:
if (time.time() - connection.last_active_at > self.max_idle_time
and not connection.awaiting_response):
connection.disconnect()
try:
self._available_connections.remove(connection)
except ValueError:
pass
self._created_connections -= 1
break
await asyncio.sleep(self.idle_check_interval)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, *args, **kwargs):
"""Gets a connection from the pool"""
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"""Creates a new connection"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
connection = self.connection_class(**self.connection_kwargs)
if self.max_idle_time > self.idle_check_interval > 0:
# do not await the future
asyncio.ensure_future(self.disconnect_on_idle_time_exceeded(connection))
return connection
def release(self, connection):
"""Releases the connection back to the pool"""
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
# discard connection with unread response
if connection.awaiting_response:
connection.disconnect()
self._created_connections -= 1
else:
self._available_connections.append(connection)
def disconnect(self):
"""Closes all connections in the pool"""
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
self._created_connections -= 1
class ClusterConnectionPool(ConnectionPool):
"""Custom connection pool for rediscluster"""
RedisClusterDefaultTimeout = None
def __init__(self, startup_nodes=None, connection_class=ClusterConnection,
max_connections=None, max_connections_per_node=False, reinitialize_steps=None,
skip_full_coverage_check=False, nodemanager_follow_cluster=False, readonly=False,
max_idle_time=0, idle_check_interval=1,
**connection_kwargs):
"""
:skip_full_coverage_check:
Skips the check of cluster-require-full-coverage config, useful for clusters
without the CONFIG command (like aws)
:nodemanager_follow_cluster:
The node manager will during initialization try the last set of nodes that
it was operating on. This will allow the client to drift along side the cluster
if the cluster nodes move around alot.
"""
super(ClusterConnectionPool, self).__init__(connection_class=connection_class, max_connections=max_connections)
# Special case to make from_url method compliant with cluster setting.
# from_url method will send in the ip and port through a different variable then the
# regular startup_nodes variable.
if startup_nodes is None:
if 'port' in connection_kwargs and 'host' in connection_kwargs:
startup_nodes = [{
'host': connection_kwargs.pop('host'),
'port': str(connection_kwargs.pop('port')),
}]
self.max_connections = max_connections or 2 ** 31
self.max_connections_per_node = max_connections_per_node
self.nodes = NodeManager(
startup_nodes,
reinitialize_steps=reinitialize_steps,
skip_full_coverage_check=skip_full_coverage_check,
max_connections=self.max_connections,
nodemanager_follow_cluster=nodemanager_follow_cluster,
**connection_kwargs
)
self.initialized = False
self.connections = {}
self.connection_kwargs = connection_kwargs
self.connection_kwargs['readonly'] = readonly
self.readonly = readonly
self.max_idle_time = max_idle_time
self.idle_check_interval = idle_check_interval
self.reset()
if "stream_timeout" not in self.connection_kwargs:
self.connection_kwargs["stream_timeout"] = ClusterConnectionPool.RedisClusterDefaultTimeout
def __repr__(self):
"""
Returns a string with all unique ip:port combinations that this pool
is connected to
"""
return "{0}<{1}>".format(
type(self).__name__,
", ".join([self.connection_class.description.format(**node)
for node in self.nodes.startup_nodes])
)
async def initialize(self):
if not self.initialized:
await self.nodes.initialize()
self.initialized = True
async def disconnect_on_idle_time_exceeded(self, connection):
while True:
if (time.time() - connection.last_active_at > self.max_idle_time
and not connection.awaiting_response):
connection.disconnect()
node = connection.node
self._available_connections[node['name']].remove(connection)
self._created_connections_per_node[node['name']] -= 1
break
await asyncio.sleep(self.idle_check_interval)
def reset(self):
"""Resets the connection pool back to a clean state"""
self.pid = os.getpid()
self._created_connections_per_node = {} # Dict(Node, Int)
self._available_connections = {} # Dict(Node, List)
self._in_use_connections = {} # Dict(Node, Set)
self._check_lock = threading.Lock()
self.initialized = False
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lockself.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
# Only pubsub command/connection should be allowed here
if command_name != "pubsub":
raise RedisClusterException("Only 'pubsub' commands can use get_connection()")
channel = options.pop('channel', None)
if not channel:
return self.get_random_connection()
slot = self.nodes.keyslot(channel)
node = self.get_master_node_by_slot(slot)
self._checkpid()
try:
connection = self._available_connections.get(node["name"], []).pop()
except IndexError:
connection = self.make_connection(node)
if node['name'] not in self._in_use_connections:
self._in_use_connections[node['name']] = set()
self._in_use_connections[node['name']].add(connection)
return connection
def make_connection(self, node):
"""Creates a new connection"""
if self.count_all_num_connections(node) >= self.max_connections:
if self.max_connections_per_node:
raise RedisClusterException("Too many connection ({0}) for node: {1}"
.format(self.count_all_num_connections(node),
node['name']))
raise RedisClusterException("Too many connections")
self._created_connections_per_node.setdefault(node['name'], 0)
self._created_connections_per_node[node['name']] += 1
connection = self.connection_class(host=node["host"],
port=node["port"],
**self.connection_kwargs)
# Must store node in the connection to make it eaiser to track
connection.node = node
if self.max_idle_time > self.idle_check_interval > 0:
# do not await the future
asyncio.ensure_future(self.disconnect_on_idle_time_exceeded(connection))
return connection
def release(self, connection):
"""Releases the connection back to the pool"""
self._checkpid()
if connection.pid != self.pid:
return
# Remove the current connection from _in_use_connection and add it back to the available pool
# There is cases where the connection is to be removed but it will not exist and there
# must be a safe way to remove
i_c = self._in_use_connections.get(connection.node["name"], set())
if connection in i_c:
i_c.remove(connection)
else:
pass
# discard connection with unread response
if connection.awaiting_response:
connection.disconnect()
# reduce node connection count in case of too many connection error raised
if self.max_connections_per_node and self._created_connections_per_node.get(connection.node['name']):
self._created_connections_per_node[connection.node['name']] -= 1
else:
self._available_connections.setdefault(connection.node["name"], []).append(connection)
def disconnect(self):
"""Closes all connectins in the pool"""
all_conns = chain(
self._available_connections.values(),
self._in_use_connections.values(),
)
for node_connections in all_conns:
for connection in node_connections:
connection.disconnect()
def count_all_num_connections(self, node):
if self.max_connections_per_node:
return self._created_connections_per_node.get(node['name'], 0)
return sum([i for i in self._created_connections_per_node.values()])
def get_random_connection(self):
"""Opens new connection to random redis server"""
if self._available_connections:
node_name = random.choice(list(self._available_connections.keys()))
conn_list = self._available_connections[node_name]
# check it in case of empty connection list
if conn_list:
return conn_list.pop()
for node in self.nodes.random_startup_node_iter():
connection = self.get_connection_by_node(node)
if connection:
return connection
raise Exception("Cant reach a single startup node.")
def get_connection_by_key(self, key):
if not key:
raise RedisClusterException("No way to dispatch this command to Redis Cluster.")
return self.get_connection_by_slot(self.nodes.keyslot(key))
def get_connection_by_slot(self, slot):
"""
Determines what server a specific slot belongs to and return a redis
object that is connected
"""
self._checkpid()
try:
return self.get_connection_by_node(self.get_node_by_slot(slot))
except KeyError:
return self.get_random_connection()
def get_connection_by_node(self, node):
"""Gets a connection by node"""
self._checkpid()
self.nodes.set_node_name(node)
try:
# Try to get connection from existing pool
connection = self._available_connections.get(node["name"], []).pop()
except IndexError:
connection = self.make_connection(node)
self._in_use_connections.setdefault(node["name"], set()).add(connection)
return connection
def get_master_node_by_slot(self, slot):
return self.nodes.slots[slot][0]
def get_node_by_slot(self, slot):
if self.readonly:
return random.choice(self.nodes.slots[slot])
return self.get_master_node_by_slot(slot)
| 38.822355 | 119 | 0.611774 |
import asyncio
import os
import warnings
import time
import random
import threading
from itertools import chain
from urllib.parse import (parse_qs,
unquote,
urlparse)
from aredis.connection import (RedisSSLContext,
Connection,
UnixDomainSocketConnection,
ClusterConnection)
from aredis.nodemanager import NodeManager
from aredis.exceptions import (ConnectionError,
RedisClusterException)
FALSE_STRINGS = ('0', 'F', 'FALSE', 'N', 'NO')
def to_bool(value):
if value is None or value == '':
return None
if isinstance(value, str) and value.upper() in FALSE_STRINGS:
return False
return bool(value)
URL_QUERY_ARGUMENT_PARSERS = {
'stream_timeout': float,
'connect_timeout': float,
'retry_on_timeout': to_bool
}
class ConnectionPool(object):
@classmethod
def from_url(cls, url, db=None, decode_components=False, **kwargs):
url = urlparse(url)
qs = url.query
url_options = {}
for name, value in iter(parse_qs(qs).items()):
if value and len(value) > 0:
parser = URL_QUERY_ARGUMENT_PARSERS.get(name)
if parser:
try:
url_options[name] = parser(value[0])
except (TypeError, ValueError):
warnings.warn(UserWarning(
"Invalid value for `%s` in connection URL." % name
))
else:
url_options[name] = value[0]
if decode_components:
password = unquote(url.password) if url.password else None
path = unquote(url.path) if url.path else None
hostname = unquote(url.hostname) if url.hostname else None
else:
password = url.password
path = url.path
hostname = url.hostname
if url.scheme == 'unix':
url_options.update({
'password': password,
'path': path,
'connection_class': UnixDomainSocketConnection,
})
else:
url_options.update({
'host': hostname,
'port': int(url.port or 6379),
'password': password,
})
# querystring value wasn't specified
if 'db' not in url_options and path:
try:
url_options['db'] = int(path.replace('/', ''))
except (AttributeError, ValueError):
pass
if url.scheme == 'rediss':
keyfile = url_options.pop('ssl_keyfile', None)
certfile = url_options.pop('ssl_certfile', None)
cert_reqs = url_options.pop('ssl_cert_reqs', None)
ca_certs = url_options.pop('ssl_ca_certs', None)
url_options['ssl_context'] = RedisSSLContext(keyfile, certfile, cert_reqs, ca_certs).get()
url_options['db'] = int(url_options.get('db', db or 0))
kwargs.update(url_options)
return cls(**kwargs)
def __init__(self, connection_class=Connection, max_connections=None,
max_idle_time=0, idle_check_interval=1,
**connection_kwargs):
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, int) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.max_idle_time = max_idle_time
self.idle_check_interval = idle_check_interval
self.loop = self.connection_kwargs.get('loop')
self.reset()
def __repr__(self):
return '{}<{}>'.format(
type(self).__name__,
self.connection_class.description.format(**self.connection_kwargs),
)
async def disconnect_on_idle_time_exceeded(self, connection):
while True:
if (time.time() - connection.last_active_at > self.max_idle_time
and not connection.awaiting_response):
connection.disconnect()
try:
self._available_connections.remove(connection)
except ValueError:
pass
self._created_connections -= 1
break
await asyncio.sleep(self.idle_check_interval)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
return
self.disconnect()
self.reset()
def get_connection(self, *args, **kwargs):
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
connection = self.connection_class(**self.connection_kwargs)
if self.max_idle_time > self.idle_check_interval > 0:
asyncio.ensure_future(self.disconnect_on_idle_time_exceeded(connection))
return connection
def release(self, connection):
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
if connection.awaiting_response:
connection.disconnect()
self._created_connections -= 1
else:
self._available_connections.append(connection)
def disconnect(self):
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
self._created_connections -= 1
class ClusterConnectionPool(ConnectionPool):
RedisClusterDefaultTimeout = None
def __init__(self, startup_nodes=None, connection_class=ClusterConnection,
max_connections=None, max_connections_per_node=False, reinitialize_steps=None,
skip_full_coverage_check=False, nodemanager_follow_cluster=False, readonly=False,
max_idle_time=0, idle_check_interval=1,
**connection_kwargs):
super(ClusterConnectionPool, self).__init__(connection_class=connection_class, max_connections=max_connections)
if startup_nodes is None:
if 'port' in connection_kwargs and 'host' in connection_kwargs:
startup_nodes = [{
'host': connection_kwargs.pop('host'),
'port': str(connection_kwargs.pop('port')),
}]
self.max_connections = max_connections or 2 ** 31
self.max_connections_per_node = max_connections_per_node
self.nodes = NodeManager(
startup_nodes,
reinitialize_steps=reinitialize_steps,
skip_full_coverage_check=skip_full_coverage_check,
max_connections=self.max_connections,
nodemanager_follow_cluster=nodemanager_follow_cluster,
**connection_kwargs
)
self.initialized = False
self.connections = {}
self.connection_kwargs = connection_kwargs
self.connection_kwargs['readonly'] = readonly
self.readonly = readonly
self.max_idle_time = max_idle_time
self.idle_check_interval = idle_check_interval
self.reset()
if "stream_timeout" not in self.connection_kwargs:
self.connection_kwargs["stream_timeout"] = ClusterConnectionPool.RedisClusterDefaultTimeout
def __repr__(self):
return "{0}<{1}>".format(
type(self).__name__,
", ".join([self.connection_class.description.format(**node)
for node in self.nodes.startup_nodes])
)
async def initialize(self):
if not self.initialized:
await self.nodes.initialize()
self.initialized = True
async def disconnect_on_idle_time_exceeded(self, connection):
while True:
if (time.time() - connection.last_active_at > self.max_idle_time
and not connection.awaiting_response):
connection.disconnect()
node = connection.node
self._available_connections[node['name']].remove(connection)
self._created_connections_per_node[node['name']] -= 1
break
await asyncio.sleep(self.idle_check_interval)
def reset(self):
self.pid = os.getpid()
self._created_connections_per_node = {}
self._available_connections = {}
self._in_use_connections = {}
self._check_lock = threading.Lock()
self.initialized = False
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
if command_name != "pubsub":
raise RedisClusterException("Only 'pubsub' commands can use get_connection()")
channel = options.pop('channel', None)
if not channel:
return self.get_random_connection()
slot = self.nodes.keyslot(channel)
node = self.get_master_node_by_slot(slot)
self._checkpid()
try:
connection = self._available_connections.get(node["name"], []).pop()
except IndexError:
connection = self.make_connection(node)
if node['name'] not in self._in_use_connections:
self._in_use_connections[node['name']] = set()
self._in_use_connections[node['name']].add(connection)
return connection
def make_connection(self, node):
if self.count_all_num_connections(node) >= self.max_connections:
if self.max_connections_per_node:
raise RedisClusterException("Too many connection ({0}) for node: {1}"
.format(self.count_all_num_connections(node),
node['name']))
raise RedisClusterException("Too many connections")
self._created_connections_per_node.setdefault(node['name'], 0)
self._created_connections_per_node[node['name']] += 1
connection = self.connection_class(host=node["host"],
port=node["port"],
**self.connection_kwargs)
connection.node = node
if self.max_idle_time > self.idle_check_interval > 0:
asyncio.ensure_future(self.disconnect_on_idle_time_exceeded(connection))
return connection
def release(self, connection):
self._checkpid()
if connection.pid != self.pid:
return
i_c = self._in_use_connections.get(connection.node["name"], set())
if connection in i_c:
i_c.remove(connection)
else:
pass
if connection.awaiting_response:
connection.disconnect()
if self.max_connections_per_node and self._created_connections_per_node.get(connection.node['name']):
self._created_connections_per_node[connection.node['name']] -= 1
else:
self._available_connections.setdefault(connection.node["name"], []).append(connection)
def disconnect(self):
all_conns = chain(
self._available_connections.values(),
self._in_use_connections.values(),
)
for node_connections in all_conns:
for connection in node_connections:
connection.disconnect()
def count_all_num_connections(self, node):
if self.max_connections_per_node:
return self._created_connections_per_node.get(node['name'], 0)
return sum([i for i in self._created_connections_per_node.values()])
def get_random_connection(self):
if self._available_connections:
node_name = random.choice(list(self._available_connections.keys()))
conn_list = self._available_connections[node_name]
if conn_list:
return conn_list.pop()
for node in self.nodes.random_startup_node_iter():
connection = self.get_connection_by_node(node)
if connection:
return connection
raise Exception("Cant reach a single startup node.")
def get_connection_by_key(self, key):
if not key:
raise RedisClusterException("No way to dispatch this command to Redis Cluster.")
return self.get_connection_by_slot(self.nodes.keyslot(key))
def get_connection_by_slot(self, slot):
self._checkpid()
try:
return self.get_connection_by_node(self.get_node_by_slot(slot))
except KeyError:
return self.get_random_connection()
def get_connection_by_node(self, node):
self._checkpid()
self.nodes.set_node_name(node)
try:
connection = self._available_connections.get(node["name"], []).pop()
except IndexError:
connection = self.make_connection(node)
self._in_use_connections.setdefault(node["name"], set()).add(connection)
return connection
def get_master_node_by_slot(self, slot):
return self.nodes.slots[slot][0]
def get_node_by_slot(self, slot):
if self.readonly:
return random.choice(self.nodes.slots[slot])
return self.get_master_node_by_slot(slot)
| true | true |
f73b31234e443bebd1fd34ce1f18d098d1497847 | 4,100 | py | Python | apps/utils/CodeConstant.py | Chise1/my_audit_monitor | e302c339be4083cc03349096142bcff85b6947e5 | [
"BSD-3-Clause"
] | null | null | null | apps/utils/CodeConstant.py | Chise1/my_audit_monitor | e302c339be4083cc03349096142bcff85b6947e5 | [
"BSD-3-Clause"
] | null | null | null | apps/utils/CodeConstant.py | Chise1/my_audit_monitor | e302c339be4083cc03349096142bcff85b6947e5 | [
"BSD-3-Clause"
] | null | null | null | class CodeConstant():
SYSTEM_ERROR = "系统错误,请与管理员联系"
REQUEST_FAILUE = "请求失败"
FILE_UPLOAD_FAILUE = "文件上传失败"
FILE_DELETE_FAILUE = "文件删除失败"
CODE_000 = "000" # 接口提交成功
CODE_001 = "001" # 接口非法请求错误
CODE_002 = "002" # 接口传递参数错误
CODE_003 = "003" # 接口异常
# 接口返回码信息 ** /
REQUEST_SUCCESS = "000" # 请求成功 *
REQUEST_FAIL = "001" # 请求失败
COMMON_ILIEGAL_REQUEST = "002" # 参数信息不合法
COMMON_NON_MAIN_ACCOUNT_REQUEST = "003" # 非主帐号
COMMON_INTERFACE_ERROR = "999" # 接口异常 *
# 商户类返回码 以1开头标识 ** /
REGISTER_ISHAVE = "10001" # 用户名已存在
RREGISTER_ISNULL = "10002" # 用户名为空
REGISTER_PWD_ISNULL = "10003" # 密码为空
RREGISTER_CODE_ISNULL = "10004" # 验证码为空
RREGISTER_CODE_ERROR = "10005" # 验证码错误
LOGIN_ACCOUNT_ISNULL = "10006" # 账户或密码为空
LOGIN_ACCOUNT_ISNOTHAVE = "10007" # 账户不存在
LOGIN_ACCOUNT_ERROR = "10008" # 账户或密码错误
SENDMESSAGE_MOBILE_ISNULL = "10009" # 手机号码为空 *
MOBILE_ISHAVE = "10010" # 手机号码已注册 *
COMPANY_BASIC_ISNOTHAVE = "10011" # 未找到用户信息
STORE_ID_ISNULL = "10012" # 店铺ID为空
FINDPWD_PWD_NOT_AS = "10013" # 两次密码输入不一致
COMPANY_MOBILE_BINGING = "10014" # 手机号码已被绑定
COMPANY_EMAIL_BINGING = "10015" # 邮箱已被绑定
COMPANY_MOBILE_NOTREGISTER = "10016" # 手机号码在系统中不存在
COMPANY_CODE_ISNULL = "10017" # 图形验证码为空
CODE_PIC_ERR = "10018" # 图形验证码为空
COMPANY_EMAIL_ISNULL = "10019" # 邮箱为空
COMPANY_MODPWD_ISLOSE = "10020" # 修改密码地址已失效
MEMBER_ADDRESS_DEFAULT_ISNOTDEL = "10021" # 默认地址不能删除
MEMBER_STATUS_ISNOTDEL = "10022" # 账号为启用状态,不能删除
MEMBER_ROLE_ISNULL = "10023" # 角色不存在
MEMBER_ROLE_ISLINK_DEL = "10024" # 角色下存在关联账户
MEMBER_ROLE_REPEAT = "10025" # 角色重复
LOGIN_ACCOUNT_IS_NO_LOGIN = "10026" # 账号状态为停用
FINDPWD_TYPE_ISNULL = "10027" # 找回 / 修改类型为空
COMPANY_MODMOBILE_ISLOSE = "10028" # 修改手机地址已失效
COMPANY_MODEMAIL_ISLOSE = "10029" # 修改邮箱地址已失效
COMPANY_EMAIL_NOTREGISTER = "10030" # 邮箱在系统中不存在
COMPANY_ACCOUNT_ISHAVE = "10031" # 该帐号已被占用
COMPANY_CHECKCODE_ISNULL = "10032" # 推荐码为空
COMPANY_CHECKCODE_ISLOSE = "10033" # 推荐码无效
COMPANY_CHECKCODE_ISHAVE = "10034" # 推荐码已使用
MEMBER_EMPLOYEE_STORE_ISNULL = "10035" # 账户下未赋予店铺权限
MEMBER_STORE_AUTH_ERROR = "10036" # 店铺授权错误
MEMBER_SUBACCOUNT_NOTREGISTER = "10037" # 子帐号不存在
MEMBER_EMPLOYEE_ROLE_ISNULL = "10038" # 账户下未赋予角色权限
MEMBER_SUBACCOUNT_ISNULL = "10039" # 子帐号为空
UPDATE_SUBACCOUNTSTATUS_TYPE_ISNULL = "10040" # 修改子帐号状态,类型为空
UPDATE_SUBACCOUNTSTATUS_SUBACCOUNTID_ISNULL = "10041" # 修改子帐号状态,子帐号ID为空
MEMBER_SUBACCOUNTID_ISNULL = "10042" # 子帐号ID为空
MEMBER_ACCOUNT_NOAUTHORITY = "10043" # 此帐号没有操作权限
MEMBER_ROLEID_ISNULL = "10044" # 角色ID为空
# 授权令牌信息返回码 以2开头标识 ** /
COMPANY_BASIC_TOKEN_ISNULL = "20001" # 授权令牌为空
COMPANY_BASIC_TOKEN_ERR = "20002" # 授权令牌错误
COMPANY_BASIC_TOKEN_EXPIRE = "20003" # 授权令牌过期,请重新登录
COMPANY_STORE_ISHAVE1 = "20004" # 该站点下已存在店铺
SENDMESSAGE_TEMPLATE_ERR = "20005" # 获取短信模板失败
COMPANY_STORE_ISHAVE = "20006" # 店铺已存在
COMPANY_STORE_TOKEN_ISHAVE = "20007" # 授权信息已授权,请联系管理员
LOGIN_PASSWORD_ERROR = "20008" # 原密码错误
IMAGE_UPLOAD_SIZE_ERROR = "20009" # 图片大小超出限制
IMAGE_UPLOAD_TYPE_ERROR = "20012" # 图片类型错误
# 供应链管理信息返回码 以3开头标识 ** /
# 订单任务信息返回码 以4开头标识 ** /
# 订单 以5开头标识 ** /
# License授权 ** /
LICENSE_ERROR = "60001" # license错误
# 案场稽核 ** /
CASE_PROJECT_IS_NULL = "80000" # 项目未找到
UPLOAD_FILE_DATA_ERROR = "80001" # 导入数据错误
NO_FIND_CASE_FACE_SEARCH = "80002" # 未匹配到到访信息
CASE_PROJECT_IS_NOT_NULL = "80003" # 项目重复
CASE_PROJECT_SERVER_REPEAT = "80004" # 项目前端服务器重复
CASE_PROJECT_INTEGRATED_REPEAT = "80005" # 项目一体机重复
CASE_PROJECT_CAMERA_REPEAT = "80006" # 项目摄像头重复
CASE_PROJECT_SERVER_FORMAT_ERROR = "80007" # 项目前端服务器录入格式错误
CASE_PROJECT_INTEGRATED_FORMAT_ERROR = "80008" # 项目一体机录入格式错误
CASE_PROJECT_CAMERA_FORMAT_ERROR = "80009" # 项目摄像头录入格式错误
CASE_PROJECT_AUTHORIZE_ERROR = "80010" # 项目授权码错误
CASE_PROJECT_AUTHORIZE_IS_USE = "80011" # 项目授权码已使用
| 26.11465 | 76 | 0.682927 | class CodeConstant():
SYSTEM_ERROR = "系统错误,请与管理员联系"
REQUEST_FAILUE = "请求失败"
FILE_UPLOAD_FAILUE = "文件上传失败"
FILE_DELETE_FAILUE = "文件删除失败"
CODE_000 = "000"
CODE_001 = "001"
CODE_002 = "002"
CODE_003 = "003"
REQUEST_SUCCESS = "000"
REQUEST_FAIL = "001"
COMMON_ILIEGAL_REQUEST = "002"
COMMON_NON_MAIN_ACCOUNT_REQUEST = "003"
COMMON_INTERFACE_ERROR = "999"
REGISTER_ISHAVE = "10001"
RREGISTER_ISNULL = "10002"
REGISTER_PWD_ISNULL = "10003"
RREGISTER_CODE_ISNULL = "10004"
RREGISTER_CODE_ERROR = "10005"
LOGIN_ACCOUNT_ISNULL = "10006"
LOGIN_ACCOUNT_ISNOTHAVE = "10007"
LOGIN_ACCOUNT_ERROR = "10008"
SENDMESSAGE_MOBILE_ISNULL = "10009"
MOBILE_ISHAVE = "10010"
COMPANY_BASIC_ISNOTHAVE = "10011"
STORE_ID_ISNULL = "10012"
FINDPWD_PWD_NOT_AS = "10013"
COMPANY_MOBILE_BINGING = "10014"
COMPANY_EMAIL_BINGING = "10015"
COMPANY_MOBILE_NOTREGISTER = "10016"
COMPANY_CODE_ISNULL = "10017"
CODE_PIC_ERR = "10018"
COMPANY_EMAIL_ISNULL = "10019"
COMPANY_MODPWD_ISLOSE = "10020"
MEMBER_ADDRESS_DEFAULT_ISNOTDEL = "10021"
MEMBER_STATUS_ISNOTDEL = "10022"
MEMBER_ROLE_ISNULL = "10023"
MEMBER_ROLE_ISLINK_DEL = "10024"
MEMBER_ROLE_REPEAT = "10025"
LOGIN_ACCOUNT_IS_NO_LOGIN = "10026"
FINDPWD_TYPE_ISNULL = "10027"
COMPANY_MODMOBILE_ISLOSE = "10028"
COMPANY_MODEMAIL_ISLOSE = "10029"
COMPANY_EMAIL_NOTREGISTER = "10030"
COMPANY_ACCOUNT_ISHAVE = "10031"
COMPANY_CHECKCODE_ISNULL = "10032"
COMPANY_CHECKCODE_ISLOSE = "10033"
COMPANY_CHECKCODE_ISHAVE = "10034"
MEMBER_EMPLOYEE_STORE_ISNULL = "10035"
MEMBER_STORE_AUTH_ERROR = "10036"
MEMBER_SUBACCOUNT_NOTREGISTER = "10037"
MEMBER_EMPLOYEE_ROLE_ISNULL = "10038"
MEMBER_SUBACCOUNT_ISNULL = "10039"
UPDATE_SUBACCOUNTSTATUS_TYPE_ISNULL = "10040"
UPDATE_SUBACCOUNTSTATUS_SUBACCOUNTID_ISNULL = "10041"
MEMBER_SUBACCOUNTID_ISNULL = "10042"
MEMBER_ACCOUNT_NOAUTHORITY = "10043"
MEMBER_ROLEID_ISNULL = "10044"
COMPANY_BASIC_TOKEN_ISNULL = "20001"
COMPANY_BASIC_TOKEN_ERR = "20002"
COMPANY_BASIC_TOKEN_EXPIRE = "20003"
COMPANY_STORE_ISHAVE1 = "20004"
SENDMESSAGE_TEMPLATE_ERR = "20005"
COMPANY_STORE_ISHAVE = "20006"
COMPANY_STORE_TOKEN_ISHAVE = "20007"
LOGIN_PASSWORD_ERROR = "20008"
IMAGE_UPLOAD_SIZE_ERROR = "20009"
IMAGE_UPLOAD_TYPE_ERROR = "20012"
LICENSE_ERROR = "60001"
CASE_PROJECT_IS_NULL = "80000"
UPLOAD_FILE_DATA_ERROR = "80001"
NO_FIND_CASE_FACE_SEARCH = "80002"
CASE_PROJECT_IS_NOT_NULL = "80003"
CASE_PROJECT_SERVER_REPEAT = "80004"
CASE_PROJECT_INTEGRATED_REPEAT = "80005"
CASE_PROJECT_CAMERA_REPEAT = "80006"
CASE_PROJECT_SERVER_FORMAT_ERROR = "80007"
CASE_PROJECT_INTEGRATED_FORMAT_ERROR = "80008"
CASE_PROJECT_CAMERA_FORMAT_ERROR = "80009"
CASE_PROJECT_AUTHORIZE_ERROR = "80010"
CASE_PROJECT_AUTHORIZE_IS_USE = "80011"
| true | true |
f73b315ad6d4a2584d59e7f836b3669d55a2e359 | 3,938 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_resourcegroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
| 25.082803 | 93 | 0.599543 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_resourcegroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
| true | true |
f73b3191fdb4783e101b49dac92aa8396e043e85 | 1,811 | py | Python | Conteudo das Aulas/125/125.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/125/125.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | Conteudo das Aulas/125/125.py | cerberus707/lab-python | ebba3c9cde873d70d4bb61084f79ce30b7f9e047 | [
"Apache-2.0"
] | null | null | null | print("Propriedades e Descritores")
input()
print("Propriedades - Permitem gerenciar a criação")
print("e manipulação de atributos de uma da classe")
print("Semelhante aos métodos __getattr__, __setattr")
print("e __getattribute__ porem menos genéricos")
input()
print("Exemplo")
class Pessoa(object):
def __init__(self, nome):
self._nome = nome
def getNome(self):
print('Obtendo...')
return self._nome
def setNome(self, valor):
print('Modificando...')
self._nome = valor
def delNome(self):
print('Removendo...')
del self._name
nome = property(getNome, setNome, delNome, "Documentação da propriedade nome")
bob = Pessoa('Bob Smith')
print(bob.nome)
bob.nome = 'Robert Smith'
print(bob.nome)
print('-'*20)
sue = Pessoa('Sue Jones')
print(sue.nome)
print(Pessoa.nome.__doc__)
input()
print("Descritores - Funcionam como propriedades")
print("entretanto os métodos get, set, del e a descrição")
print("são todos feitos por uma classe específica com")
print("protocolo bem definido")
input()
class Nome(object):
"Documentação da propriedade nome"
def __get__(self, instancia, dono):
print('Obtendo...')
return instancia._nome
def __set__(self, instancia, valor):
print('Modificando...')
instancia._nome = valor
def __delete__(self, instancia):
print('Removendo...')
del instancia._nome
class Pessoa(object):
def __init__(self, nome):
self._nome = nome
nome = Nome()
nome = Nome()
bob = Pessoa('Bob Smith')
print(bob.nome)
bob.nome = 'Robert Smith'
print(bob.nome)
del bob.nome
print('-'*20)
sue = Pessoa('Sue Jones')
print(sue.nome)
print(Nome.__doc__)
input()
print("A função 'property' em python é usada para criar")
print("um descritor")
input()
| 24.146667 | 82 | 0.673661 | print("Propriedades e Descritores")
input()
print("Propriedades - Permitem gerenciar a criação")
print("e manipulação de atributos de uma da classe")
print("Semelhante aos métodos __getattr__, __setattr")
print("e __getattribute__ porem menos genéricos")
input()
print("Exemplo")
class Pessoa(object):
def __init__(self, nome):
self._nome = nome
def getNome(self):
print('Obtendo...')
return self._nome
def setNome(self, valor):
print('Modificando...')
self._nome = valor
def delNome(self):
print('Removendo...')
del self._name
nome = property(getNome, setNome, delNome, "Documentação da propriedade nome")
bob = Pessoa('Bob Smith')
print(bob.nome)
bob.nome = 'Robert Smith'
print(bob.nome)
print('-'*20)
sue = Pessoa('Sue Jones')
print(sue.nome)
print(Pessoa.nome.__doc__)
input()
print("Descritores - Funcionam como propriedades")
print("entretanto os métodos get, set, del e a descrição")
print("são todos feitos por uma classe específica com")
print("protocolo bem definido")
input()
class Nome(object):
def __get__(self, instancia, dono):
print('Obtendo...')
return instancia._nome
def __set__(self, instancia, valor):
print('Modificando...')
instancia._nome = valor
def __delete__(self, instancia):
print('Removendo...')
del instancia._nome
class Pessoa(object):
def __init__(self, nome):
self._nome = nome
nome = Nome()
nome = Nome()
bob = Pessoa('Bob Smith')
print(bob.nome)
bob.nome = 'Robert Smith'
print(bob.nome)
del bob.nome
print('-'*20)
sue = Pessoa('Sue Jones')
print(sue.nome)
print(Nome.__doc__)
input()
print("A função 'property' em python é usada para criar")
print("um descritor")
input()
| true | true |
f73b32cf54ce18fc20be898f47d908c0b8ec30cf | 46,846 | py | Python | demisto_sdk/commands/create_artifacts/content_artifacts_creator.py | guiguitodelperuu/demisto-sdk | 3eb0206593bc955a64c6594d717c04e52e254e1d | [
"MIT"
] | 42 | 2019-11-07T13:02:00.000Z | 2022-03-29T03:39:04.000Z | demisto_sdk/commands/create_artifacts/content_artifacts_creator.py | guiguitodelperuu/demisto-sdk | 3eb0206593bc955a64c6594d717c04e52e254e1d | [
"MIT"
] | 1,437 | 2019-11-07T13:02:25.000Z | 2022-03-31T12:48:11.000Z | demisto_sdk/commands/create_artifacts/content_artifacts_creator.py | guiguitodelperuu/demisto-sdk | 3eb0206593bc955a64c6594d717c04e52e254e1d | [
"MIT"
] | 46 | 2019-12-09T21:44:30.000Z | 2022-03-24T17:36:45.000Z | # -*- coding: utf-8 -*-
import logging
import os
import re
import sys
import time
from concurrent.futures import as_completed
from contextlib import contextmanager
from shutil import make_archive, rmtree
from typing import Callable, Dict, List, Optional, Union
from packaging.version import parse
from pebble import ProcessFuture, ProcessPool
from wcmatch.pathlib import BRACE, EXTMATCH, NEGATE, NODIR, SPLIT, Path
from demisto_sdk.commands.common.constants import (
BASE_PACK, CLASSIFIERS_DIR, CONTENT_ITEMS_DISPLAY_FOLDERS, DASHBOARDS_DIR,
DOCUMENTATION_DIR, GENERIC_DEFINITIONS_DIR, GENERIC_FIELDS_DIR,
GENERIC_MODULES_DIR, GENERIC_TYPES_DIR, INCIDENT_FIELDS_DIR,
INCIDENT_TYPES_DIR, INDICATOR_FIELDS_DIR, INDICATOR_TYPES_DIR,
INTEGRATIONS_DIR, LAYOUTS_DIR, PACKS_DIR, PLAYBOOKS_DIR,
PRE_PROCESS_RULES_DIR, RELEASE_NOTES_DIR, REPORTS_DIR, SCRIPTS_DIR,
TEST_PLAYBOOKS_DIR, TOOLS_DIR, WIDGETS_DIR, ContentItems)
from demisto_sdk.commands.common.content import (Content, ContentError,
ContentFactoryError, Pack)
from demisto_sdk.commands.common.content.objects.pack_objects import (
JSONContentObject, Script, TextObject, YAMLContentObject,
YAMLContentUnifiedObject)
from demisto_sdk.commands.common.tools import arg_to_list
from .artifacts_report import ArtifactsReport, ObjectReport
####################
# Global variables #
####################
FIRST_MARKETPLACE_VERSION = parse('6.0.0')
IGNORED_PACKS = ['ApiModules']
IGNORED_TEST_PLAYBOOKS_DIR = 'Deprecated'
ContentObject = Union[YAMLContentUnifiedObject, YAMLContentObject, JSONContentObject, TextObject]
logger = logging.getLogger('demisto-sdk')
EX_SUCCESS = 0
EX_FAIL = 1
##############
# Main logic #
##############
class ArtifactsManager:
def __init__(self, artifacts_path: str, zip: bool, packs: bool, content_version: str, suffix: str,
cpus: int, id_set_path: str = '', pack_names: str = 'all', signature_key: str = '',
sign_directory: Path = None, remove_test_playbooks: bool = True):
""" Content artifacts configuration
Args:
artifacts_path: existing destination directory for creating artifacts.
zip: True for zip all content artifacts to 3 different zip files in same structure else False.
packs: create only content_packs artifacts if True.
content_version: release content version.
suffix: suffix to add all file we creates.
cpus: available cpus in the computer.
id_set_path: the full path of id_set.json.
pack_names: Packs to create artifacts for.
signature_key: Base64 encoded signature key used for signing packs.
sign_directory: Path to the signDirectory executable file.
remove_test_playbooks: Should remove test playbooks from content packs or not.
"""
# options arguments
self.artifacts_path = Path(artifacts_path)
self.zip_artifacts = zip
self.only_content_packs = packs
self.content_version = content_version
self.suffix = suffix
self.cpus = cpus
self.id_set_path = id_set_path
self.pack_names = arg_to_list(pack_names)
self.signature_key = signature_key
self.signDirectory = sign_directory
self.remove_test_playbooks = remove_test_playbooks
# run related arguments
self.content_new_path = self.artifacts_path / 'content_new'
self.content_test_path = self.artifacts_path / 'content_test'
self.content_packs_path = self.artifacts_path / 'content_packs'
self.content_all_path = self.artifacts_path / 'all_content'
self.content_uploadable_zips_path = self.artifacts_path / 'uploadable_packs'
# inits
self.content = Content.from_cwd()
self.execution_start = time.time()
self.packs = self.content.packs
self.exit_code = EX_SUCCESS
def create_content_artifacts(self) -> int:
with ArtifactsDirsHandler(self), ProcessPoolHandler(self) as pool:
futures: List[ProcessFuture] = []
# content/Packs
futures.extend(dump_packs(self, pool))
# content/TestPlaybooks
if not self.remove_test_playbooks:
futures.append(pool.schedule(dump_tests_conditionally, args=(self,)))
# content/content-descriptor.json
futures.append(pool.schedule(dump_content_descriptor, args=(self,)))
# content/Documentation/doc-*.json
futures.append(pool.schedule(dump_content_documentations, args=(self,)))
# Wait for all futures to be finished
wait_futures_complete(futures, self)
# Add suffix
suffix_handler(self)
if os.path.exists('keyfile'):
os.remove('keyfile')
logger.info(f"\nExecution time: {time.time() - self.execution_start} seconds")
return self.exit_code
def get_relative_pack_path(self, content_object: ContentObject):
"""
Args:
content_object: the object to get the relative path for
Returns:
the path of the given object relative from the pack directory, for example HelloWorld/Scripts/some_script
"""
return content_object.path.relative_to(self.content.path / PACKS_DIR)
def get_base_path(self) -> Path:
"""
Returns:
the path that all artifacts are relative to
"""
return self.content.path
def get_dir_to_delete(self):
"""
Returns:
list of directories to delete after artifacts was created
"""
return [self.content_test_path, self.content_new_path, self.content_packs_path, self.content_all_path]
class ContentItemsHandler:
def __init__(self):
self.server_min_version = parse('1.0.0')
self.content_items: Dict[ContentItems, List] = {
ContentItems.SCRIPTS: [],
ContentItems.PLAYBOOKS: [],
ContentItems.INTEGRATIONS: [],
ContentItems.INCIDENT_FIELDS: [],
ContentItems.INCIDENT_TYPES: [],
ContentItems.DASHBOARDS: [],
ContentItems.INDICATOR_FIELDS: [],
ContentItems.REPORTS: [],
ContentItems.INDICATOR_TYPES: [],
ContentItems.LAYOUTS: [],
ContentItems.PRE_PROCESS_RULES: [],
ContentItems.CLASSIFIERS: [],
ContentItems.WIDGETS: [],
ContentItems.GENERIC_FIELDS: [],
ContentItems.GENERIC_TYPES: [],
ContentItems.GENERIC_MODULES: [],
ContentItems.GENERIC_DEFINITIONS: []
}
self.content_folder_name_to_func: Dict[str, Callable] = {
SCRIPTS_DIR: self.add_script_as_content_item,
PLAYBOOKS_DIR: self.add_playbook_as_content_item,
INTEGRATIONS_DIR: self.add_integration_as_content_item,
INCIDENT_FIELDS_DIR: self.add_incident_field_as_content_item,
INCIDENT_TYPES_DIR: self.add_incident_type_as_content_item,
DASHBOARDS_DIR: self.add_dashboard_as_content_item,
INDICATOR_FIELDS_DIR: self.add_indicator_field_as_content_item,
INDICATOR_TYPES_DIR: self.add_indicator_type_as_content_item,
REPORTS_DIR: self.add_report_as_content_item,
LAYOUTS_DIR: self.add_layout_as_content_item,
PRE_PROCESS_RULES_DIR: self.add_pre_process_rules_as_content_item,
CLASSIFIERS_DIR: self.add_classifier_as_content_item,
WIDGETS_DIR: self.add_widget_as_content_item,
GENERIC_TYPES_DIR: self.add_generic_type_as_content_item,
GENERIC_FIELDS_DIR: self.add_generic_field_as_content_item,
GENERIC_MODULES_DIR: self.add_generic_module_as_content_item,
GENERIC_DEFINITIONS_DIR: self.add_generic_definition_as_content_item
}
def handle_content_item(self, content_object: ContentObject):
"""Verifies the validity of the content object and parses it to the correct entities list.
Args:
content_object (ContentObject): The object to add to entities list.
"""
content_object_directory = content_object.path.parts[-3]
if content_object_directory not in self.content_folder_name_to_func.keys():
# In the case where the content object is nested directly in the entities directory (Playbooks for example).
content_object_directory = content_object.path.parts[-2]
if content_object.to_version < FIRST_MARKETPLACE_VERSION:
return
# reputation in old format aren't supported in 6.0.0 server version
if content_object_directory == INDICATOR_TYPES_DIR and not re.match(content_object.path.name,
'reputation-.*.json'):
return
# skip content items that are not displayed in contentItems
if content_object_directory not in CONTENT_ITEMS_DISPLAY_FOLDERS:
return
self.server_min_version = max(self.server_min_version, content_object.from_version)
self.content_folder_name_to_func[content_object_directory](content_object)
def add_script_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.SCRIPTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('comment', ''),
'tags': content_object.get('tags', [])
})
def add_playbook_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.PLAYBOOKS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', ''),
})
def add_integration_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INTEGRATIONS].append({
'name': content_object.get('display', ""),
'description': content_object.get('description', ''),
'category': content_object.get('category', ''),
'commands': [
{
'name': command.get('name', ''),
'description': command.get('description', '')
}
for command in content_object.script.get('commands', [])]
})
def add_incident_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INCIDENT_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_incident_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INCIDENT_TYPES].append({
'name': content_object.get('name', ''),
'playbook': content_object.get('playbookId', ''),
'closureScript': content_object.get('closureScript', ''),
'hours': int(content_object.get('hours', 0)),
'days': int(content_object.get('days', 0)),
'weeks': int(content_object.get('weeks', 0))
})
def add_dashboard_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.DASHBOARDS].append({
'name': content_object.get('name', '')
})
def add_indicator_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INDICATOR_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_indicator_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INDICATOR_TYPES].append({
'details': content_object.get('details', ''),
'reputationScriptName': content_object.get('reputationScriptName', ''),
'enhancementScriptNames': content_object.get('enhancementScriptNames', [])
})
def add_report_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.REPORTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
def add_layout_as_content_item(self, content_object: ContentObject):
if content_object.get('description') is not None:
self.content_items[ContentItems.LAYOUTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description')
})
else:
self.content_items[ContentItems.LAYOUTS].append({
'name': content_object.get('name', '')
})
def add_pre_process_rules_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.PRE_PROCESS_RULES].append({
'name': content_object.get('name') or content_object.get('id', ''),
'description': content_object.get('description', ''),
})
def add_classifier_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.CLASSIFIERS].append({
'name': content_object.get('name') or content_object.get('id', ''),
'description': content_object.get('description', '')
})
def add_widget_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.WIDGETS].append({
'name': content_object.get('name', ''),
'dataType': content_object.get('dataType', ''),
'widgetType': content_object.get('widgetType', '')
})
def add_generic_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_generic_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_TYPES].append({
'name': content_object.get('name', ''),
'details': content_object.get('details', ''),
})
def add_generic_definition_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_DEFINITIONS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
def add_generic_module_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_MODULES].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
@contextmanager
def ProcessPoolHandler(artifact_manager: ArtifactsManager) -> ProcessPool:
""" Process pool Handler which terminate all processes in case of Exception.
Args:
artifact_manager: Artifacts manager object.
Yields:
ProcessPool: Pebble process pool.
"""
global logger
with ProcessPool(max_workers=artifact_manager.cpus, initializer=child_mute) as pool:
try:
yield pool
except KeyboardInterrupt:
logger.info("\nCTRL+C Pressed!\nGracefully release all resources due to keyboard interrupt...")
pool.stop()
pool.join()
raise
except Exception as e:
logger.exception(e)
logger.error("Gracefully release all resources due to Error...")
pool.stop()
pool.join()
raise
else:
pool.close()
pool.join()
finally:
if os.path.exists('keyfile'):
os.remove('keyfile')
def wait_futures_complete(futures: List[ProcessFuture], artifact_manager: ArtifactsManager):
"""Wait for all futures to complete, Raise exception if occured.
Args:
artifact_manager: Artifacts manager object.
futures: futures to wait for.
Raises:
Exception: Raise caught exception for further cleanups.
"""
global logger
for future in as_completed(futures):
try:
result = future.result()
if isinstance(result, ArtifactsReport):
logger.info(result.to_str(artifact_manager.get_base_path()))
except (ContentError, DuplicateFiles, ContentFactoryError) as e:
logger.error(e.msg)
raise
except Exception as e:
logger.exception(e)
raise
#####################################################
# Files include rules functions (Version, Type etc) #
#####################################################
def is_in_content_packs(content_object: ContentObject) -> bool:
""" Rules content_packs:
1. to_version >= First marketplace version.
Args:
content_object: Content object as specified in global variable - ContentObject.
Returns:
bool: True if object should be included in content_packs artifacts else False.
"""
return content_object.to_version >= FIRST_MARKETPLACE_VERSION
def is_in_content_test(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
"""Rules content_test:
1. flag of only packs is off.
2. Object located in TestPlaybooks directory (*/TestPlaybooks/*).
3. from_version < First marketplace version.
4. Path of object is not including global variable - IGNORED_TEST_PLAYBOOKS_DIR
Args:
artifact_manager: Artifacts manager object.
content_object: Content object as specified in global variable - ContentObject.
Returns:
bool: True if object should be included in content_test artifacts else False.
"""
return (not artifact_manager.only_content_packs and
TEST_PLAYBOOKS_DIR in content_object.path.parts and
content_object.from_version < FIRST_MARKETPLACE_VERSION and
IGNORED_TEST_PLAYBOOKS_DIR not in content_object.path.parts)
def is_in_content_new(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
""" Rules content_new:
1. flag of only packs is off.
2. Object not located in TestPlaybooks directory (*/TestPlaybooks/*).
3. from_version < First marketplace version
Args:
artifact_manager: Artifacts manager object.
content_object: Content object as specified in global variable - ContentObject.
Returns:
bool: True if object should be included in content_new artifacts else False.
"""
return (not artifact_manager.only_content_packs and
TEST_PLAYBOOKS_DIR not in content_object.path.parts and
content_object.from_version < FIRST_MARKETPLACE_VERSION)
def is_in_content_all(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
""" Rules content_all:
1. If in content_new or content_test.
Args:
artifact_manager: Artifacts manager object.
content_object: Content object as specified in global variable - ContentObject.
Returns:
bool: True if object should be included in content_all artifacts else False.
"""
return is_in_content_new(artifact_manager, content_object) or is_in_content_test(artifact_manager, content_object)
############################
# Documentations functions #
############################
def dump_content_documentations(artifact_manager: ArtifactsManager) -> ArtifactsReport:
""" Dumping Documentation/doc-*.json into:
1. content_new
2. content_all
Args:
artifact_manager: Artifacts manager object.
Returns:
ArtifactsReport: ArtifactsReport object.
"""
report = ArtifactsReport("Documentations:")
for documentation in artifact_manager.content.documentations:
object_report = ObjectReport(documentation, content_packs=True)
created_files = documentation.dump(artifact_manager.content_packs_path / BASE_PACK / DOCUMENTATION_DIR)
if not artifact_manager.only_content_packs:
object_report.set_content_new()
object_report.set_content_all()
for dest in [artifact_manager.content_new_path,
artifact_manager.content_all_path]:
created_files = dump_link_files(artifact_manager, documentation, dest, created_files)
report.append(object_report)
return report
########################
# Descriptor functions #
########################
def dump_content_descriptor(artifact_manager: ArtifactsManager) -> ArtifactsReport:
""" Dumping content/content_descriptor.json into:
1. content_test
2. content_new
3. content_all
Args:
artifact_manager: Artifacts manager object.
Returns:
ArtifactsReport: ArtifactsReport object.
Notes:
1. content_descriptor.json created during build run time.
"""
report = ArtifactsReport("Content descriptor:")
if not artifact_manager.only_content_packs and artifact_manager.content.content_descriptor:
descriptor = artifact_manager.content.content_descriptor
object_report = ObjectReport(descriptor, content_test=True, content_new=True, content_all=True)
created_files: List[Path] = []
for dest in [artifact_manager.content_test_path,
artifact_manager.content_new_path,
artifact_manager.content_all_path]:
created_files = dump_link_files(artifact_manager, descriptor, dest, created_files)
report.append(object_report)
return report
##################################
# Content Testplaybook functions #
##################################
def dump_tests_conditionally(artifact_manager: ArtifactsManager) -> ArtifactsReport:
""" Dump test scripts/playbooks conditionally into:
1. content_test
Args:
artifact_manager: Artifacts manager object.
Returns:
ArtifactsReport: ArtifactsReport object.
"""
report = ArtifactsReport("TestPlaybooks:")
for test in artifact_manager.content.test_playbooks:
object_report = ObjectReport(test)
if is_in_content_test(artifact_manager, test):
object_report.set_content_test()
test_created_files = dump_link_files(artifact_manager, test, artifact_manager.content_test_path)
dump_link_files(artifact_manager, test, artifact_manager.content_all_path, test_created_files)
report += object_report
return report
###########################
# Content packs functions #
###########################
def dump_packs(artifact_manager: ArtifactsManager, pool: ProcessPool) -> List[ProcessFuture]:
""" Create futures which dumps conditionally content/Packs.
Args:
artifact_manager: Artifacts manager object.
pool: Process pool to schedule new processes.
Returns:
List[ProcessFuture]: List of pebble futures to wait for.
"""
futures = []
if 'all' in artifact_manager.pack_names:
for pack_name, pack in artifact_manager.packs.items():
if pack_name not in IGNORED_PACKS:
futures.append(pool.schedule(dump_pack, args=(artifact_manager, pack)))
else:
for pack_name in artifact_manager.pack_names:
if pack_name not in IGNORED_PACKS and pack_name in artifact_manager.packs:
futures.append(pool.schedule(dump_pack,
args=(artifact_manager, artifact_manager.packs[pack_name])
))
return futures
def dump_pack(artifact_manager: ArtifactsManager, pack: Pack) -> ArtifactsReport: # noqa: C901
""" Dumping content/Packs/<pack_id>/ into:
1. content_test
2. content_new
3. content_all
4. content_packs
5. uploadable_packs
Args:
artifact_manager: Artifacts manager object.
pack: Pack object.
Notes:
1. Include all file object, excluding:
a. Change_log files (Deprecated).
b. Integration/Script/Playbook readme (Used for website documentation deployment).
c. .pack-ignore (Internal only).
d. .secrets-ignore (Internal only).
Returns:
ArtifactsReport: ArtifactsReport object.
"""
global logger
pack_report = ArtifactsReport(f"Pack {pack.id}:")
pack.metadata.load_user_metadata(pack.id, pack.path.name, pack.path, logger)
content_items_handler = ContentItemsHandler()
is_feed_pack = False
for integration in pack.integrations:
content_items_handler.handle_content_item(integration)
is_feed_pack = is_feed_pack or integration.is_feed
pack_report += dump_pack_conditionally(artifact_manager, integration)
for script in pack.scripts:
content_items_handler.handle_content_item(script)
pack_report += dump_pack_conditionally(artifact_manager, script)
for playbook in pack.playbooks:
content_items_handler.handle_content_item(playbook)
is_feed_pack = is_feed_pack or playbook.get('name', '').startswith('TIM')
pack_report += dump_pack_conditionally(artifact_manager, playbook)
for test_playbook in pack.test_playbooks:
pack_report += dump_pack_conditionally(artifact_manager, test_playbook)
for report in pack.reports:
content_items_handler.handle_content_item(report)
pack_report += dump_pack_conditionally(artifact_manager, report)
for layout in pack.layouts:
content_items_handler.handle_content_item(layout)
pack_report += dump_pack_conditionally(artifact_manager, layout)
for pre_process_rule in pack.pre_process_rules:
content_items_handler.handle_content_item(pre_process_rule)
pack_report += dump_pack_conditionally(artifact_manager, pre_process_rule)
for dashboard in pack.dashboards:
content_items_handler.handle_content_item(dashboard)
pack_report += dump_pack_conditionally(artifact_manager, dashboard)
for incident_field in pack.incident_fields:
content_items_handler.handle_content_item(incident_field)
pack_report += dump_pack_conditionally(artifact_manager, incident_field)
for incident_type in pack.incident_types:
content_items_handler.handle_content_item(incident_type)
pack_report += dump_pack_conditionally(artifact_manager, incident_type)
for indicator_field in pack.indicator_fields:
content_items_handler.handle_content_item(indicator_field)
pack_report += dump_pack_conditionally(artifact_manager, indicator_field)
for indicator_type in pack.indicator_types:
content_items_handler.handle_content_item(indicator_type)
pack_report += dump_pack_conditionally(artifact_manager, indicator_type)
for connection in pack.connections:
pack_report += dump_pack_conditionally(artifact_manager, connection)
for classifier in pack.classifiers:
content_items_handler.handle_content_item(classifier)
pack_report += dump_pack_conditionally(artifact_manager, classifier)
for widget in pack.widgets:
content_items_handler.handle_content_item(widget)
pack_report += dump_pack_conditionally(artifact_manager, widget)
for generic_definition in pack.generic_definitions:
content_items_handler.handle_content_item(generic_definition)
pack_report += dump_pack_conditionally(artifact_manager, generic_definition)
for generic_module in pack.generic_modules:
content_items_handler.handle_content_item(generic_module)
pack_report += dump_pack_conditionally(artifact_manager, generic_module)
for generic_type in pack.generic_types:
content_items_handler.handle_content_item(generic_type)
pack_report += dump_pack_conditionally(artifact_manager, generic_type)
for generic_field in pack.generic_fields:
content_items_handler.handle_content_item(generic_field)
pack_report += dump_pack_conditionally(artifact_manager, generic_field)
for release_note in pack.release_notes:
pack_report += ObjectReport(release_note, content_packs=True)
release_note.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR)
for release_note_config in pack.release_notes_config:
pack_report += ObjectReport(release_note_config, content_packs=True)
release_note_config.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR)
for tool in pack.tools:
object_report = ObjectReport(tool, content_packs=True)
created_files = tool.dump(artifact_manager.content_packs_path / pack.id / TOOLS_DIR)
if not artifact_manager.only_content_packs:
object_report.set_content_new()
dump_link_files(artifact_manager, tool, artifact_manager.content_new_path, created_files)
object_report.set_content_all()
dump_link_files(artifact_manager, tool, artifact_manager.content_all_path, created_files)
pack_report += object_report
if pack.pack_metadata:
pack_report += ObjectReport(pack.pack_metadata, content_packs=True)
pack.pack_metadata.dump(artifact_manager.content_packs_path / pack.id)
if pack.metadata:
pack_report += ObjectReport(pack.metadata, content_packs=True)
pack.metadata.content_items = content_items_handler.content_items
pack.metadata.server_min_version = pack.metadata.server_min_version or content_items_handler.server_min_version
if artifact_manager.id_set_path:
# Dependencies can only be done when id_set file is given.
pack.metadata.handle_dependencies(pack.path.name, artifact_manager.id_set_path, logger)
else:
logger.warning('Skipping dependencies extraction since no id_set file was provided.')
if is_feed_pack and 'TIM' not in pack.metadata.tags:
pack.metadata.tags.append('TIM')
pack.metadata.dump_metadata_file(artifact_manager.content_packs_path / pack.id)
if pack.readme or pack.contributors:
if not pack.readme:
readme_file = os.path.join(pack.path, 'README.md')
open(readme_file, 'a+').close()
readme_obj = pack.readme
readme_obj.contributors = pack.contributors
pack_report += ObjectReport(readme_obj, content_packs=True)
readme_obj.dump(artifact_manager.content_packs_path / pack.id)
if pack.author_image:
pack_report += ObjectReport(pack.author_image, content_packs=True)
pack.author_image.dump(artifact_manager.content_packs_path / pack.id)
return pack_report
def dump_pack_conditionally(artifact_manager: ArtifactsManager, content_object: ContentObject) -> ObjectReport:
""" Dump pack object by the following logic
Args:
artifact_manager: Artifacts manager object.
content_object: content_object (e.g. Integration/Script/Layout etc)
Returns:
ObjectReport: ObjectReport object.
"""
object_report = ObjectReport(content_object)
pack_created_files: List[Path] = []
test_new_created_files: List[Path] = []
with content_files_handler(artifact_manager, content_object) as files_to_remove:
# Content packs filter - When unify also _45.yml created which should be deleted after copy it if needed
if is_in_content_packs(content_object):
object_report.set_content_packs()
# Unify will create *_45.yml files which shouldn't be in content_packs
pack_created_files.extend(dump_link_files(artifact_manager, content_object,
artifact_manager.content_packs_path /
calc_relative_packs_dir(artifact_manager, content_object)))
# Collecting files *_45.yml which created and need to be removed after execution.
files_to_remove.extend(
[created_file for created_file in pack_created_files if created_file.name.endswith('_45.yml')])
# Content test filter
if is_in_content_test(artifact_manager, content_object):
object_report.set_content_test()
test_new_created_files = dump_link_files(artifact_manager, content_object,
artifact_manager.content_test_path, pack_created_files)
# Content new filter
if is_in_content_new(artifact_manager, content_object):
object_report.set_content_new()
test_new_created_files = dump_link_files(artifact_manager, content_object,
artifact_manager.content_new_path, pack_created_files)
# Content all filter
if is_in_content_all(artifact_manager, content_object):
object_report.set_content_all()
dump_link_files(artifact_manager, content_object, artifact_manager.content_all_path, test_new_created_files)
return object_report
@contextmanager
def content_files_handler(artifact_manager: ArtifactsManager, content_object: ContentObject):
""" Pre-processing pack, perform the following:
1. Change content/Packs/Base/Scripts/CommonServerPython.py global variables:
a. CONTENT_RELEASE_VERSION to given content version flag.
b. CONTENT_BRANCH_NAME to active branch
Post-processing pack, perform the following:
1. Change content/Packs/Base/Scripts/CommonServerPython.py to original state.
2. Unifier creates *_45.yml files in content_pack by default which is not support due to_version lower than
NEWEST_SUPPORTED_VERSION, Therefor after copy it to content_new, delete it.
Args:
artifact_manager: Command line configuration.
content_object: content_object (e.g. Integration/Script/Layout etc)
Yields:
List[Path]: List of file to be removed after execution.
"""
files_to_remove: List[Path] = []
try:
if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \
content_object.code_path and content_object.code_path.name == 'CommonServerPython.py':
# Modify CommonServerPython.py global variables
repo = artifact_manager.content.git()
modify_common_server_constants(content_object.code_path, artifact_manager.content_version,
'master' if not repo else repo.active_branch)
yield files_to_remove
finally:
if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \
content_object.code_path and content_object.code_path.name == 'CommonServerPython.py':
# Modify CommonServerPython.py global variables
modify_common_server_constants(content_object.code_path, '0.0.0', 'master')
# Delete yaml which created by Unifier in packs and to_version/toVersion lower than NEWEST_SUPPORTED_VERSION
for file_path in files_to_remove:
file_path.unlink()
def modify_common_server_constants(code_path: Path, content_version: str, branch_name: Optional[str] = None):
""" Modify content/Packs/Base/Scripts/CommonServerPython.py global variables:
a. CONTENT_RELEASE_VERSION to given content version flag.
b. CONTENT_BRANCH_NAME to active branch
Args:
code_path: Packs/Base/Scripts/CommonServerPython.py full code path.
branch_name: branch name to update in CONTENT_BRANCH_NAME
content_version: content version to update in CONTENT_RELEASE_VERSION
"""
file_content_new = re.sub(r"CONTENT_RELEASE_VERSION = '\d.\d.\d'",
f"CONTENT_RELEASE_VERSION = '{content_version}'",
code_path.read_text())
file_content_new = re.sub(r"CONTENT_BRANCH_NAME = '\w+'",
f"CONTENT_BRANCH_NAME = '{branch_name}'",
file_content_new)
code_path.write_text(file_content_new)
########################
# Suffix add functions #
########################
def suffix_handler(artifact_manager: ArtifactsManager):
""" Add suffix to file names exclude:
1. pack_metadata.json
2. README.
3. content_descriptor.json
3. ReleaseNotes/**
Include:
1. *.json
2. *.(yaml|yml)
Args:
artifact_manager: Artifacts manager object.
"""
files_pattern_to_add_suffix = "!reputations.json|!pack_metadata.json|" \
"!doc-*.json|!content-descriptor.json|*.{json,yml,yaml}"
if artifact_manager.suffix:
files_content_packs = artifact_manager.content_packs_path.rglob(
files_pattern_to_add_suffix, flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
files_content_test = artifact_manager.content_test_path.rglob(files_pattern_to_add_suffix,
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
files_content_new = artifact_manager.content_new_path.rglob(files_pattern_to_add_suffix,
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
files_content_all = artifact_manager.content_all_path.rglob(files_pattern_to_add_suffix,
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
for files in [files_content_new, files_content_packs, files_content_test, files_content_all]:
for file in files:
file_name_split = file.name.split('.')
file_real_stem = ".".join(file_name_split[:-1])
suffix = file_name_split[-1]
file.rename(file.with_name(f'{file_real_stem}{artifact_manager.suffix}.{suffix}'))
###########
# Helpers #
###########
class DuplicateFiles(Exception):
def __init__(self, exiting_file: Path, src: Path):
""" Exception raised when 2 files with the same name existing in same directory when creating artifacts
Args:
exiting_file: File allready exists in artifacts.
src: File source which copy or link to same directory.
"""
self.exiting_file = exiting_file
self.src = src
self.msg = f"\nFound duplicate files\n1. {src}\n2. {exiting_file}"
def dump_link_files(artifact_manager: ArtifactsManager, content_object: ContentObject,
dest_dir: Path, created_files: Optional[List[Path]] = None) -> List[Path]:
""" Dump content object to requested destination dir.
Due to performance issue if known files already created and dump is done for the same object, This function
will link files instead of creating the files from scratch (Reduce unify, split etc.)
Args:
artifact_manager: Artifacts manager object.
content_object: Content object.
dest_dir: Destination dir.
created_files: Pre-created file (Not mandatory).
Returns:
List[Path]: List of new created files.
Raises:
DuplicateFiles: Exception occurred if duplicate files exists in the same dir (Protect from override).
"""
new_created_files = []
# Handle case where files already created
if created_files:
for file in created_files:
new_file = dest_dir / file.name
if new_file.exists() and new_file.stat().st_mtime >= artifact_manager.execution_start:
raise DuplicateFiles(new_file, content_object.path)
else:
os.link(file, new_file)
new_created_files.append(new_file)
# Handle case where object first time dump.
else:
target = dest_dir / content_object.normalize_file_name()
if target.exists() and target.stat().st_mtime >= artifact_manager.execution_start:
raise DuplicateFiles(target, content_object.path)
else:
new_created_files.extend(content_object.dump(dest_dir=dest_dir))
return new_created_files
def calc_relative_packs_dir(artifact_manager: ArtifactsManager, content_object: ContentObject) -> Path:
relative_pack_path = artifact_manager.get_relative_pack_path(content_object)
if ((INTEGRATIONS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != INTEGRATIONS_DIR) or
(SCRIPTS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != SCRIPTS_DIR)):
relative_pack_path = relative_pack_path.parent.parent
else:
relative_pack_path = relative_pack_path.parent
return relative_pack_path
def child_mute():
"""Mute child process inorder to keep log clean"""
sys.stdout = open(os.devnull, 'w')
###################################
# Artifacts Directories functions #
###################################
@contextmanager
def ArtifactsDirsHandler(artifact_manager: ArtifactsManager):
""" Artifacts Directories handler.
Logic by time line:
1. Delete artifacts directories if exists.
2. Create directories.
3. If any error occurred -> Delete artifacts directories -> Exit.
4. If finish successfully:
a. If zip:
1. Sign packs if needed.
2. Zip artifacts zip.
3. Zip packs for uploading.
4. Delete artifacts directories.
5. log report.
Args:
artifact_manager: Artifacts manager object.
"""
try:
delete_dirs(artifact_manager)
create_dirs(artifact_manager)
yield
except (Exception, KeyboardInterrupt):
delete_dirs(artifact_manager)
artifact_manager.exit_code = EX_FAIL
else:
if artifact_manager.zip_artifacts:
sign_packs(artifact_manager)
zip_packs(artifact_manager)
zip_dirs(artifact_manager)
delete_dirs(artifact_manager)
report_artifacts_paths(artifact_manager)
def delete_dirs(artifact_manager: ArtifactsManager):
"""Delete artifacts directories"""
for artifact_dir in artifact_manager.get_dir_to_delete():
if artifact_dir.exists():
rmtree(artifact_dir)
def create_dirs(artifact_manager: ArtifactsManager):
"""Create artifacts directories"""
if artifact_manager.only_content_packs:
artifact_manager.content_packs_path.mkdir(parents=True)
else:
for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path,
artifact_manager.content_packs_path, artifact_manager.content_all_path]:
artifact_dir.mkdir(parents=True)
def zip_dirs(artifact_manager: ArtifactsManager):
"""Zip artifacts directories"""
if artifact_manager.only_content_packs:
make_archive(artifact_manager.content_packs_path, 'zip', artifact_manager.content_packs_path)
else:
with ProcessPoolHandler(artifact_manager) as pool:
for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path,
artifact_manager.content_packs_path, artifact_manager.content_all_path]:
pool.schedule(make_archive, args=(artifact_dir, 'zip', artifact_dir))
def zip_packs(artifact_manager: ArtifactsManager):
"""Zip packs directories"""
with ProcessPoolHandler(artifact_manager) as pool:
for pack_name, pack in artifact_manager.packs.items():
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
zip_path = os.path.join(artifact_manager.content_uploadable_zips_path, pack.id)
pool.schedule(make_archive, args=(zip_path, 'zip', dumped_pack_dir))
def report_artifacts_paths(artifact_manager: ArtifactsManager):
"""Report artifacts results destination"""
global logger
logger.info("\nArtifacts created:")
if artifact_manager.zip_artifacts:
template = "\n\t - {}.zip"
else:
template = "\n\t - {}"
logger.info(template.format(artifact_manager.content_packs_path))
if not artifact_manager.only_content_packs:
for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path,
artifact_manager.content_all_path]:
logger.info(template.format(artifact_dir))
if artifact_manager.zip_artifacts:
logger.info(f'\n\t - {artifact_manager.content_uploadable_zips_path}')
def sign_packs(artifact_manager: ArtifactsManager):
"""Sign packs directories"""
global logger
if artifact_manager.signDirectory and artifact_manager.signature_key:
with ProcessPoolHandler(artifact_manager) as pool:
with open('keyfile', 'wb') as keyfile:
keyfile.write(artifact_manager.signature_key.encode())
futures: List[ProcessFuture] = []
if 'all' in artifact_manager.pack_names:
for pack_name, pack in artifact_manager.packs.items():
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir,
artifact_manager.signDirectory,
)))
else:
for pack_name in artifact_manager.pack_names:
if pack_name in artifact_manager.packs:
pack = artifact_manager.packs[pack_name]
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir,
artifact_manager.signDirectory,
)))
wait_futures_complete(futures, artifact_manager)
elif artifact_manager.signDirectory or artifact_manager.signature_key:
logger.error('Failed to sign packs. In order to do so, you need to provide both signature_key and '
'sign_directory arguments.')
| 43.822264 | 120 | 0.668104 |
import logging
import os
import re
import sys
import time
from concurrent.futures import as_completed
from contextlib import contextmanager
from shutil import make_archive, rmtree
from typing import Callable, Dict, List, Optional, Union
from packaging.version import parse
from pebble import ProcessFuture, ProcessPool
from wcmatch.pathlib import BRACE, EXTMATCH, NEGATE, NODIR, SPLIT, Path
from demisto_sdk.commands.common.constants import (
BASE_PACK, CLASSIFIERS_DIR, CONTENT_ITEMS_DISPLAY_FOLDERS, DASHBOARDS_DIR,
DOCUMENTATION_DIR, GENERIC_DEFINITIONS_DIR, GENERIC_FIELDS_DIR,
GENERIC_MODULES_DIR, GENERIC_TYPES_DIR, INCIDENT_FIELDS_DIR,
INCIDENT_TYPES_DIR, INDICATOR_FIELDS_DIR, INDICATOR_TYPES_DIR,
INTEGRATIONS_DIR, LAYOUTS_DIR, PACKS_DIR, PLAYBOOKS_DIR,
PRE_PROCESS_RULES_DIR, RELEASE_NOTES_DIR, REPORTS_DIR, SCRIPTS_DIR,
TEST_PLAYBOOKS_DIR, TOOLS_DIR, WIDGETS_DIR, ContentItems)
from demisto_sdk.commands.common.content import (Content, ContentError,
ContentFactoryError, Pack)
from demisto_sdk.commands.common.content.objects.pack_objects import (
JSONContentObject, Script, TextObject, YAMLContentObject,
YAMLContentUnifiedObject)
from demisto_sdk.commands.common.tools import arg_to_list
from .artifacts_report import ArtifactsReport, ObjectReport
one, remove_test_playbooks: bool = True):
self.artifacts_path = Path(artifacts_path)
self.zip_artifacts = zip
self.only_content_packs = packs
self.content_version = content_version
self.suffix = suffix
self.cpus = cpus
self.id_set_path = id_set_path
self.pack_names = arg_to_list(pack_names)
self.signature_key = signature_key
self.signDirectory = sign_directory
self.remove_test_playbooks = remove_test_playbooks
self.content_new_path = self.artifacts_path / 'content_new'
self.content_test_path = self.artifacts_path / 'content_test'
self.content_packs_path = self.artifacts_path / 'content_packs'
self.content_all_path = self.artifacts_path / 'all_content'
self.content_uploadable_zips_path = self.artifacts_path / 'uploadable_packs'
self.content = Content.from_cwd()
self.execution_start = time.time()
self.packs = self.content.packs
self.exit_code = EX_SUCCESS
def create_content_artifacts(self) -> int:
with ArtifactsDirsHandler(self), ProcessPoolHandler(self) as pool:
futures: List[ProcessFuture] = []
futures.extend(dump_packs(self, pool))
if not self.remove_test_playbooks:
futures.append(pool.schedule(dump_tests_conditionally, args=(self,)))
futures.append(pool.schedule(dump_content_descriptor, args=(self,)))
futures.append(pool.schedule(dump_content_documentations, args=(self,)))
wait_futures_complete(futures, self)
suffix_handler(self)
if os.path.exists('keyfile'):
os.remove('keyfile')
logger.info(f"\nExecution time: {time.time() - self.execution_start} seconds")
return self.exit_code
def get_relative_pack_path(self, content_object: ContentObject):
return content_object.path.relative_to(self.content.path / PACKS_DIR)
def get_base_path(self) -> Path:
return self.content.path
def get_dir_to_delete(self):
return [self.content_test_path, self.content_new_path, self.content_packs_path, self.content_all_path]
class ContentItemsHandler:
def __init__(self):
self.server_min_version = parse('1.0.0')
self.content_items: Dict[ContentItems, List] = {
ContentItems.SCRIPTS: [],
ContentItems.PLAYBOOKS: [],
ContentItems.INTEGRATIONS: [],
ContentItems.INCIDENT_FIELDS: [],
ContentItems.INCIDENT_TYPES: [],
ContentItems.DASHBOARDS: [],
ContentItems.INDICATOR_FIELDS: [],
ContentItems.REPORTS: [],
ContentItems.INDICATOR_TYPES: [],
ContentItems.LAYOUTS: [],
ContentItems.PRE_PROCESS_RULES: [],
ContentItems.CLASSIFIERS: [],
ContentItems.WIDGETS: [],
ContentItems.GENERIC_FIELDS: [],
ContentItems.GENERIC_TYPES: [],
ContentItems.GENERIC_MODULES: [],
ContentItems.GENERIC_DEFINITIONS: []
}
self.content_folder_name_to_func: Dict[str, Callable] = {
SCRIPTS_DIR: self.add_script_as_content_item,
PLAYBOOKS_DIR: self.add_playbook_as_content_item,
INTEGRATIONS_DIR: self.add_integration_as_content_item,
INCIDENT_FIELDS_DIR: self.add_incident_field_as_content_item,
INCIDENT_TYPES_DIR: self.add_incident_type_as_content_item,
DASHBOARDS_DIR: self.add_dashboard_as_content_item,
INDICATOR_FIELDS_DIR: self.add_indicator_field_as_content_item,
INDICATOR_TYPES_DIR: self.add_indicator_type_as_content_item,
REPORTS_DIR: self.add_report_as_content_item,
LAYOUTS_DIR: self.add_layout_as_content_item,
PRE_PROCESS_RULES_DIR: self.add_pre_process_rules_as_content_item,
CLASSIFIERS_DIR: self.add_classifier_as_content_item,
WIDGETS_DIR: self.add_widget_as_content_item,
GENERIC_TYPES_DIR: self.add_generic_type_as_content_item,
GENERIC_FIELDS_DIR: self.add_generic_field_as_content_item,
GENERIC_MODULES_DIR: self.add_generic_module_as_content_item,
GENERIC_DEFINITIONS_DIR: self.add_generic_definition_as_content_item
}
def handle_content_item(self, content_object: ContentObject):
content_object_directory = content_object.path.parts[-3]
if content_object_directory not in self.content_folder_name_to_func.keys():
content_object_directory = content_object.path.parts[-2]
if content_object.to_version < FIRST_MARKETPLACE_VERSION:
return
if content_object_directory == INDICATOR_TYPES_DIR and not re.match(content_object.path.name,
'reputation-.*.json'):
return
# skip content items that are not displayed in contentItems
if content_object_directory not in CONTENT_ITEMS_DISPLAY_FOLDERS:
return
self.server_min_version = max(self.server_min_version, content_object.from_version)
self.content_folder_name_to_func[content_object_directory](content_object)
def add_script_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.SCRIPTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('comment', ''),
'tags': content_object.get('tags', [])
})
def add_playbook_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.PLAYBOOKS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', ''),
})
def add_integration_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INTEGRATIONS].append({
'name': content_object.get('display', ""),
'description': content_object.get('description', ''),
'category': content_object.get('category', ''),
'commands': [
{
'name': command.get('name', ''),
'description': command.get('description', '')
}
for command in content_object.script.get('commands', [])]
})
def add_incident_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INCIDENT_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_incident_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INCIDENT_TYPES].append({
'name': content_object.get('name', ''),
'playbook': content_object.get('playbookId', ''),
'closureScript': content_object.get('closureScript', ''),
'hours': int(content_object.get('hours', 0)),
'days': int(content_object.get('days', 0)),
'weeks': int(content_object.get('weeks', 0))
})
def add_dashboard_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.DASHBOARDS].append({
'name': content_object.get('name', '')
})
def add_indicator_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INDICATOR_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_indicator_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.INDICATOR_TYPES].append({
'details': content_object.get('details', ''),
'reputationScriptName': content_object.get('reputationScriptName', ''),
'enhancementScriptNames': content_object.get('enhancementScriptNames', [])
})
def add_report_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.REPORTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
def add_layout_as_content_item(self, content_object: ContentObject):
if content_object.get('description') is not None:
self.content_items[ContentItems.LAYOUTS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description')
})
else:
self.content_items[ContentItems.LAYOUTS].append({
'name': content_object.get('name', '')
})
def add_pre_process_rules_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.PRE_PROCESS_RULES].append({
'name': content_object.get('name') or content_object.get('id', ''),
'description': content_object.get('description', ''),
})
def add_classifier_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.CLASSIFIERS].append({
'name': content_object.get('name') or content_object.get('id', ''),
'description': content_object.get('description', '')
})
def add_widget_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.WIDGETS].append({
'name': content_object.get('name', ''),
'dataType': content_object.get('dataType', ''),
'widgetType': content_object.get('widgetType', '')
})
def add_generic_field_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_FIELDS].append({
'name': content_object.get('name', ''),
'type': content_object.get('type', ''),
'description': content_object.get('description', '')
})
def add_generic_type_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_TYPES].append({
'name': content_object.get('name', ''),
'details': content_object.get('details', ''),
})
def add_generic_definition_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_DEFINITIONS].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
def add_generic_module_as_content_item(self, content_object: ContentObject):
self.content_items[ContentItems.GENERIC_MODULES].append({
'name': content_object.get('name', ''),
'description': content_object.get('description', '')
})
@contextmanager
def ProcessPoolHandler(artifact_manager: ArtifactsManager) -> ProcessPool:
global logger
with ProcessPool(max_workers=artifact_manager.cpus, initializer=child_mute) as pool:
try:
yield pool
except KeyboardInterrupt:
logger.info("\nCTRL+C Pressed!\nGracefully release all resources due to keyboard interrupt...")
pool.stop()
pool.join()
raise
except Exception as e:
logger.exception(e)
logger.error("Gracefully release all resources due to Error...")
pool.stop()
pool.join()
raise
else:
pool.close()
pool.join()
finally:
if os.path.exists('keyfile'):
os.remove('keyfile')
def wait_futures_complete(futures: List[ProcessFuture], artifact_manager: ArtifactsManager):
global logger
for future in as_completed(futures):
try:
result = future.result()
if isinstance(result, ArtifactsReport):
logger.info(result.to_str(artifact_manager.get_base_path()))
except (ContentError, DuplicateFiles, ContentFactoryError) as e:
logger.error(e.msg)
raise
except Exception as e:
logger.exception(e)
raise
#####################################################
# Files include rules functions (Version, Type etc) #
#####################################################
def is_in_content_packs(content_object: ContentObject) -> bool:
return content_object.to_version >= FIRST_MARKETPLACE_VERSION
def is_in_content_test(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
return (not artifact_manager.only_content_packs and
TEST_PLAYBOOKS_DIR in content_object.path.parts and
content_object.from_version < FIRST_MARKETPLACE_VERSION and
IGNORED_TEST_PLAYBOOKS_DIR not in content_object.path.parts)
def is_in_content_new(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
return (not artifact_manager.only_content_packs and
TEST_PLAYBOOKS_DIR not in content_object.path.parts and
content_object.from_version < FIRST_MARKETPLACE_VERSION)
def is_in_content_all(artifact_manager: ArtifactsManager, content_object: ContentObject) -> bool:
return is_in_content_new(artifact_manager, content_object) or is_in_content_test(artifact_manager, content_object)
############################
# Documentations functions #
############################
def dump_content_documentations(artifact_manager: ArtifactsManager) -> ArtifactsReport:
report = ArtifactsReport("Documentations:")
for documentation in artifact_manager.content.documentations:
object_report = ObjectReport(documentation, content_packs=True)
created_files = documentation.dump(artifact_manager.content_packs_path / BASE_PACK / DOCUMENTATION_DIR)
if not artifact_manager.only_content_packs:
object_report.set_content_new()
object_report.set_content_all()
for dest in [artifact_manager.content_new_path,
artifact_manager.content_all_path]:
created_files = dump_link_files(artifact_manager, documentation, dest, created_files)
report.append(object_report)
return report
########################
# Descriptor functions #
########################
def dump_content_descriptor(artifact_manager: ArtifactsManager) -> ArtifactsReport:
report = ArtifactsReport("Content descriptor:")
if not artifact_manager.only_content_packs and artifact_manager.content.content_descriptor:
descriptor = artifact_manager.content.content_descriptor
object_report = ObjectReport(descriptor, content_test=True, content_new=True, content_all=True)
created_files: List[Path] = []
for dest in [artifact_manager.content_test_path,
artifact_manager.content_new_path,
artifact_manager.content_all_path]:
created_files = dump_link_files(artifact_manager, descriptor, dest, created_files)
report.append(object_report)
return report
##################################
# Content Testplaybook functions #
##################################
def dump_tests_conditionally(artifact_manager: ArtifactsManager) -> ArtifactsReport:
report = ArtifactsReport("TestPlaybooks:")
for test in artifact_manager.content.test_playbooks:
object_report = ObjectReport(test)
if is_in_content_test(artifact_manager, test):
object_report.set_content_test()
test_created_files = dump_link_files(artifact_manager, test, artifact_manager.content_test_path)
dump_link_files(artifact_manager, test, artifact_manager.content_all_path, test_created_files)
report += object_report
return report
###########################
# Content packs functions #
###########################
def dump_packs(artifact_manager: ArtifactsManager, pool: ProcessPool) -> List[ProcessFuture]:
futures = []
if 'all' in artifact_manager.pack_names:
for pack_name, pack in artifact_manager.packs.items():
if pack_name not in IGNORED_PACKS:
futures.append(pool.schedule(dump_pack, args=(artifact_manager, pack)))
else:
for pack_name in artifact_manager.pack_names:
if pack_name not in IGNORED_PACKS and pack_name in artifact_manager.packs:
futures.append(pool.schedule(dump_pack,
args=(artifact_manager, artifact_manager.packs[pack_name])
))
return futures
def dump_pack(artifact_manager: ArtifactsManager, pack: Pack) -> ArtifactsReport: # noqa: C901
global logger
pack_report = ArtifactsReport(f"Pack {pack.id}:")
pack.metadata.load_user_metadata(pack.id, pack.path.name, pack.path, logger)
content_items_handler = ContentItemsHandler()
is_feed_pack = False
for integration in pack.integrations:
content_items_handler.handle_content_item(integration)
is_feed_pack = is_feed_pack or integration.is_feed
pack_report += dump_pack_conditionally(artifact_manager, integration)
for script in pack.scripts:
content_items_handler.handle_content_item(script)
pack_report += dump_pack_conditionally(artifact_manager, script)
for playbook in pack.playbooks:
content_items_handler.handle_content_item(playbook)
is_feed_pack = is_feed_pack or playbook.get('name', '').startswith('TIM')
pack_report += dump_pack_conditionally(artifact_manager, playbook)
for test_playbook in pack.test_playbooks:
pack_report += dump_pack_conditionally(artifact_manager, test_playbook)
for report in pack.reports:
content_items_handler.handle_content_item(report)
pack_report += dump_pack_conditionally(artifact_manager, report)
for layout in pack.layouts:
content_items_handler.handle_content_item(layout)
pack_report += dump_pack_conditionally(artifact_manager, layout)
for pre_process_rule in pack.pre_process_rules:
content_items_handler.handle_content_item(pre_process_rule)
pack_report += dump_pack_conditionally(artifact_manager, pre_process_rule)
for dashboard in pack.dashboards:
content_items_handler.handle_content_item(dashboard)
pack_report += dump_pack_conditionally(artifact_manager, dashboard)
for incident_field in pack.incident_fields:
content_items_handler.handle_content_item(incident_field)
pack_report += dump_pack_conditionally(artifact_manager, incident_field)
for incident_type in pack.incident_types:
content_items_handler.handle_content_item(incident_type)
pack_report += dump_pack_conditionally(artifact_manager, incident_type)
for indicator_field in pack.indicator_fields:
content_items_handler.handle_content_item(indicator_field)
pack_report += dump_pack_conditionally(artifact_manager, indicator_field)
for indicator_type in pack.indicator_types:
content_items_handler.handle_content_item(indicator_type)
pack_report += dump_pack_conditionally(artifact_manager, indicator_type)
for connection in pack.connections:
pack_report += dump_pack_conditionally(artifact_manager, connection)
for classifier in pack.classifiers:
content_items_handler.handle_content_item(classifier)
pack_report += dump_pack_conditionally(artifact_manager, classifier)
for widget in pack.widgets:
content_items_handler.handle_content_item(widget)
pack_report += dump_pack_conditionally(artifact_manager, widget)
for generic_definition in pack.generic_definitions:
content_items_handler.handle_content_item(generic_definition)
pack_report += dump_pack_conditionally(artifact_manager, generic_definition)
for generic_module in pack.generic_modules:
content_items_handler.handle_content_item(generic_module)
pack_report += dump_pack_conditionally(artifact_manager, generic_module)
for generic_type in pack.generic_types:
content_items_handler.handle_content_item(generic_type)
pack_report += dump_pack_conditionally(artifact_manager, generic_type)
for generic_field in pack.generic_fields:
content_items_handler.handle_content_item(generic_field)
pack_report += dump_pack_conditionally(artifact_manager, generic_field)
for release_note in pack.release_notes:
pack_report += ObjectReport(release_note, content_packs=True)
release_note.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR)
for release_note_config in pack.release_notes_config:
pack_report += ObjectReport(release_note_config, content_packs=True)
release_note_config.dump(artifact_manager.content_packs_path / pack.id / RELEASE_NOTES_DIR)
for tool in pack.tools:
object_report = ObjectReport(tool, content_packs=True)
created_files = tool.dump(artifact_manager.content_packs_path / pack.id / TOOLS_DIR)
if not artifact_manager.only_content_packs:
object_report.set_content_new()
dump_link_files(artifact_manager, tool, artifact_manager.content_new_path, created_files)
object_report.set_content_all()
dump_link_files(artifact_manager, tool, artifact_manager.content_all_path, created_files)
pack_report += object_report
if pack.pack_metadata:
pack_report += ObjectReport(pack.pack_metadata, content_packs=True)
pack.pack_metadata.dump(artifact_manager.content_packs_path / pack.id)
if pack.metadata:
pack_report += ObjectReport(pack.metadata, content_packs=True)
pack.metadata.content_items = content_items_handler.content_items
pack.metadata.server_min_version = pack.metadata.server_min_version or content_items_handler.server_min_version
if artifact_manager.id_set_path:
# Dependencies can only be done when id_set file is given.
pack.metadata.handle_dependencies(pack.path.name, artifact_manager.id_set_path, logger)
else:
logger.warning('Skipping dependencies extraction since no id_set file was provided.')
if is_feed_pack and 'TIM' not in pack.metadata.tags:
pack.metadata.tags.append('TIM')
pack.metadata.dump_metadata_file(artifact_manager.content_packs_path / pack.id)
if pack.readme or pack.contributors:
if not pack.readme:
readme_file = os.path.join(pack.path, 'README.md')
open(readme_file, 'a+').close()
readme_obj = pack.readme
readme_obj.contributors = pack.contributors
pack_report += ObjectReport(readme_obj, content_packs=True)
readme_obj.dump(artifact_manager.content_packs_path / pack.id)
if pack.author_image:
pack_report += ObjectReport(pack.author_image, content_packs=True)
pack.author_image.dump(artifact_manager.content_packs_path / pack.id)
return pack_report
def dump_pack_conditionally(artifact_manager: ArtifactsManager, content_object: ContentObject) -> ObjectReport:
object_report = ObjectReport(content_object)
pack_created_files: List[Path] = []
test_new_created_files: List[Path] = []
with content_files_handler(artifact_manager, content_object) as files_to_remove:
# Content packs filter - When unify also _45.yml created which should be deleted after copy it if needed
if is_in_content_packs(content_object):
object_report.set_content_packs()
# Unify will create *_45.yml files which shouldn't be in content_packs
pack_created_files.extend(dump_link_files(artifact_manager, content_object,
artifact_manager.content_packs_path /
calc_relative_packs_dir(artifact_manager, content_object)))
files_to_remove.extend(
[created_file for created_file in pack_created_files if created_file.name.endswith('_45.yml')])
if is_in_content_test(artifact_manager, content_object):
object_report.set_content_test()
test_new_created_files = dump_link_files(artifact_manager, content_object,
artifact_manager.content_test_path, pack_created_files)
if is_in_content_new(artifact_manager, content_object):
object_report.set_content_new()
test_new_created_files = dump_link_files(artifact_manager, content_object,
artifact_manager.content_new_path, pack_created_files)
if is_in_content_all(artifact_manager, content_object):
object_report.set_content_all()
dump_link_files(artifact_manager, content_object, artifact_manager.content_all_path, test_new_created_files)
return object_report
@contextmanager
def content_files_handler(artifact_manager: ArtifactsManager, content_object: ContentObject):
files_to_remove: List[Path] = []
try:
if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \
content_object.code_path and content_object.code_path.name == 'CommonServerPython.py':
repo = artifact_manager.content.git()
modify_common_server_constants(content_object.code_path, artifact_manager.content_version,
'master' if not repo else repo.active_branch)
yield files_to_remove
finally:
if (BASE_PACK in content_object.path.parts) and isinstance(content_object, Script) and \
content_object.code_path and content_object.code_path.name == 'CommonServerPython.py':
modify_common_server_constants(content_object.code_path, '0.0.0', 'master')
for file_path in files_to_remove:
file_path.unlink()
def modify_common_server_constants(code_path: Path, content_version: str, branch_name: Optional[str] = None):
file_content_new = re.sub(r"CONTENT_RELEASE_VERSION = '\d.\d.\d'",
f"CONTENT_RELEASE_VERSION = '{content_version}'",
code_path.read_text())
file_content_new = re.sub(r"CONTENT_BRANCH_NAME = '\w+'",
f"CONTENT_BRANCH_NAME = '{branch_name}'",
file_content_new)
code_path.write_text(file_content_new)
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
files_content_new = artifact_manager.content_new_path.rglob(files_pattern_to_add_suffix,
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
files_content_all = artifact_manager.content_all_path.rglob(files_pattern_to_add_suffix,
flags=BRACE | SPLIT | EXTMATCH | NODIR | NEGATE)
for files in [files_content_new, files_content_packs, files_content_test, files_content_all]:
for file in files:
file_name_split = file.name.split('.')
file_real_stem = ".".join(file_name_split[:-1])
suffix = file_name_split[-1]
file.rename(file.with_name(f'{file_real_stem}{artifact_manager.suffix}.{suffix}'))
ng_file = exiting_file
self.src = src
self.msg = f"\nFound duplicate files\n1. {src}\n2. {exiting_file}"
def dump_link_files(artifact_manager: ArtifactsManager, content_object: ContentObject,
dest_dir: Path, created_files: Optional[List[Path]] = None) -> List[Path]:
new_created_files = []
if created_files:
for file in created_files:
new_file = dest_dir / file.name
if new_file.exists() and new_file.stat().st_mtime >= artifact_manager.execution_start:
raise DuplicateFiles(new_file, content_object.path)
else:
os.link(file, new_file)
new_created_files.append(new_file)
else:
target = dest_dir / content_object.normalize_file_name()
if target.exists() and target.stat().st_mtime >= artifact_manager.execution_start:
raise DuplicateFiles(target, content_object.path)
else:
new_created_files.extend(content_object.dump(dest_dir=dest_dir))
return new_created_files
def calc_relative_packs_dir(artifact_manager: ArtifactsManager, content_object: ContentObject) -> Path:
relative_pack_path = artifact_manager.get_relative_pack_path(content_object)
if ((INTEGRATIONS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != INTEGRATIONS_DIR) or
(SCRIPTS_DIR in relative_pack_path.parts and relative_pack_path.parts[-2] != SCRIPTS_DIR)):
relative_pack_path = relative_pack_path.parent.parent
else:
relative_pack_path = relative_pack_path.parent
return relative_pack_path
def child_mute():
sys.stdout = open(os.devnull, 'w')
(artifact_manager: ArtifactsManager):
if artifact_manager.only_content_packs:
make_archive(artifact_manager.content_packs_path, 'zip', artifact_manager.content_packs_path)
else:
with ProcessPoolHandler(artifact_manager) as pool:
for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path,
artifact_manager.content_packs_path, artifact_manager.content_all_path]:
pool.schedule(make_archive, args=(artifact_dir, 'zip', artifact_dir))
def zip_packs(artifact_manager: ArtifactsManager):
with ProcessPoolHandler(artifact_manager) as pool:
for pack_name, pack in artifact_manager.packs.items():
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
zip_path = os.path.join(artifact_manager.content_uploadable_zips_path, pack.id)
pool.schedule(make_archive, args=(zip_path, 'zip', dumped_pack_dir))
def report_artifacts_paths(artifact_manager: ArtifactsManager):
global logger
logger.info("\nArtifacts created:")
if artifact_manager.zip_artifacts:
template = "\n\t - {}.zip"
else:
template = "\n\t - {}"
logger.info(template.format(artifact_manager.content_packs_path))
if not artifact_manager.only_content_packs:
for artifact_dir in [artifact_manager.content_test_path, artifact_manager.content_new_path,
artifact_manager.content_all_path]:
logger.info(template.format(artifact_dir))
if artifact_manager.zip_artifacts:
logger.info(f'\n\t - {artifact_manager.content_uploadable_zips_path}')
def sign_packs(artifact_manager: ArtifactsManager):
global logger
if artifact_manager.signDirectory and artifact_manager.signature_key:
with ProcessPoolHandler(artifact_manager) as pool:
with open('keyfile', 'wb') as keyfile:
keyfile.write(artifact_manager.signature_key.encode())
futures: List[ProcessFuture] = []
if 'all' in artifact_manager.pack_names:
for pack_name, pack in artifact_manager.packs.items():
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir,
artifact_manager.signDirectory,
)))
else:
for pack_name in artifact_manager.pack_names:
if pack_name in artifact_manager.packs:
pack = artifact_manager.packs[pack_name]
dumped_pack_dir = os.path.join(artifact_manager.content_packs_path, pack.id)
futures.append(pool.schedule(pack.sign_pack, args=(logger, dumped_pack_dir,
artifact_manager.signDirectory,
)))
wait_futures_complete(futures, artifact_manager)
elif artifact_manager.signDirectory or artifact_manager.signature_key:
logger.error('Failed to sign packs. In order to do so, you need to provide both signature_key and '
'sign_directory arguments.')
| true | true |
f73b330a985a8bdc79189580261efb90f9641523 | 5,932 | py | Python | GeneratorInterface/TauolaInterface/test/TauSpinnerZTest.py | m-sedghi/cmssw | 859df8affee372c53be79cdd2d8a5ff001eae841 | [
"Apache-2.0"
] | 1 | 2020-05-27T10:52:33.000Z | 2020-05-27T10:52:33.000Z | GeneratorInterface/TauolaInterface/test/TauSpinnerZTest.py | m-sedghi/cmssw | 859df8affee372c53be79cdd2d8a5ff001eae841 | [
"Apache-2.0"
] | 28 | 2019-08-15T15:21:11.000Z | 2021-12-29T14:13:18.000Z | GeneratorInterface/TauolaInterface/test/TauSpinnerZTest.py | m-sedghi/cmssw | 859df8affee372c53be79cdd2d8a5ff001eae841 | [
"Apache-2.0"
] | 1 | 2020-08-18T10:29:49.000Z | 2020-08-18T10:29:49.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
#process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.load("GeneratorInterface.TauolaInterface.TauSpinner_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
),
TauSpinnerGen = cms.PSet(initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
process.randomEngineStateProducer = cms.EDProducer("RandomEngineStateProducer")
process.Timing=cms.Service("Timing",
summaryOnly=cms.untracked.bool(True))
# The following three lines reduce the clutter of repeated printouts
# of the same exception message.
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.destinations = ['cerr']
process.MessageLogger.statistics = []
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100))
process.source = cms.Source("EmptySource")
from Configuration.Generator.PythiaUESettings_cfi import *
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
# this shows how to turn ON some of the general Py6 printouts, like banner...
## --> displayPythiaBanner = cms.untracked.bool(True),
## --> displayPythiaCards = cms.untracked.bool(True),
comEnergy = cms.double(7000.0),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
# these settings below exemplfy how to use "native" Tauola approach:
# one MUST set mdtau=1 !!! then pjak1 & pjak2 will translate into
# native variables jak1 & jak2 (jak1/jak2=4 means that both tau's
# decay into the rho-mode
#
UseTauolaPolarization = cms.bool(True),
InputCards = cms.PSet
(
pjak1 = cms.int32(0),
pjak2 = cms.int32(0),
mdtau = cms.int32(0) # generic tau decays
## mdtau = cms.int32(240) # (any) tau -> nu pi+-
)
# TauolaDefaultInputCards,
# TauolaPolar
),
parameterSets = cms.vstring('Tauola')
),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=0 !User defined processes',
'MSUB(1)=1 !Incl Z0/gamma* production',
'MSTP(43)=3 !Both Z0 and gamma*',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=0 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=0 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'CKIN(1)=50. !Minimum sqrt(s_hat) value (=Z mass)'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
# Produce PDF weights (maximum is 3)
process.pdfWeights = cms.EDProducer("PdfWeightProducer",
# Fix POWHEG if buggy (this PDF set will also appear on output,
# so only two more PDF sets can be added in PdfSetNames if not "")
#FixPOWHEG = cms.untracked.string("cteq66.LHgrid"),
#GenTag = cms.untracked.InputTag("genParticles"),
PdfInfoTag = cms.untracked.InputTag("VtxSmeared"),
PdfSetNames = cms.untracked.vstring(
# "cteq66.LHgrid"
# , "MRST2006nnlo.LHgrid" ,
"MSTW2008nnlo90cl.LHgrid"
)
)
process.p1 = cms.Path( process.TauSpinnerGen )
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('Test_Py6_Z2TauTau_Tauola.root')
)
process.p = cms.Path(process.generator)
process.outpath = cms.EndPath(process.GEN)
process.p1 = cms.Path(process.randomEngineStateProducer*process.TauSpinnerGen)
process.schedule = cms.Schedule(process.p, process.p1, process.outpath)
| 51.137931 | 128 | 0.506237 | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("GeneratorInterface.TauolaInterface.TauSpinner_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
generator = cms.PSet(initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
),
TauSpinnerGen = cms.PSet(initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
)
process.randomEngineStateProducer = cms.EDProducer("RandomEngineStateProducer")
process.Timing=cms.Service("Timing",
summaryOnly=cms.untracked.bool(True))
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.destinations = ['cerr']
process.MessageLogger.statistics = []
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(100))
process.source = cms.Source("EmptySource")
from Configuration.Generator.PythiaUESettings_cfi import *
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
# decay into the rho-mode
#
UseTauolaPolarization = cms.bool(True),
InputCards = cms.PSet
(
pjak1 = cms.int32(0),
pjak2 = cms.int32(0),
mdtau = cms.int32(0) # generic tau decays
## mdtau = cms.int32(240) # (any) tau -> nu pi+-
)
# TauolaDefaultInputCards,
# TauolaPolar
),
parameterSets = cms.vstring('Tauola')
),
PythiaParameters = cms.PSet(
pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL=0 !User defined processes',
'MSUB(1)=1 !Incl Z0/gamma* production',
'MSTP(43)=3 !Both Z0 and gamma*',
'MDME(174,1)=0 !Z decay into d dbar',
'MDME(175,1)=0 !Z decay into u ubar',
'MDME(176,1)=0 !Z decay into s sbar',
'MDME(177,1)=0 !Z decay into c cbar',
'MDME(178,1)=0 !Z decay into b bbar',
'MDME(179,1)=0 !Z decay into t tbar',
'MDME(182,1)=0 !Z decay into e- e+',
'MDME(183,1)=0 !Z decay into nu_e nu_ebar',
'MDME(184,1)=0 !Z decay into mu- mu+',
'MDME(185,1)=0 !Z decay into nu_mu nu_mubar',
'MDME(186,1)=1 !Z decay into tau- tau+',
'MDME(187,1)=0 !Z decay into nu_tau nu_taubar',
'CKIN(1)=50. !Minimum sqrt(s_hat) value (=Z mass)'),
# This is a vector of ParameterSet names to be read, in this order
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
# Produce PDF weights (maximum is 3)
process.pdfWeights = cms.EDProducer("PdfWeightProducer",
# Fix POWHEG if buggy (this PDF set will also appear on output,
# so only two more PDF sets can be added in PdfSetNames if not "")
#FixPOWHEG = cms.untracked.string("cteq66.LHgrid"),
#GenTag = cms.untracked.InputTag("genParticles"),
PdfInfoTag = cms.untracked.InputTag("VtxSmeared"),
PdfSetNames = cms.untracked.vstring(
# "cteq66.LHgrid"
# , "MRST2006nnlo.LHgrid" ,
"MSTW2008nnlo90cl.LHgrid"
)
)
process.p1 = cms.Path( process.TauSpinnerGen )
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('Test_Py6_Z2TauTau_Tauola.root')
)
process.p = cms.Path(process.generator)
process.outpath = cms.EndPath(process.GEN)
process.p1 = cms.Path(process.randomEngineStateProducer*process.TauSpinnerGen)
process.schedule = cms.Schedule(process.p, process.p1, process.outpath)
| true | true |
f73b332d755a2f2fcc48d09b720995c5b19dfcb0 | 418 | py | Python | Python/Django/Django/LoginAndRegistration/LoginAndRegistration/wsgi.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 2 | 2018-08-18T15:14:45.000Z | 2019-10-16T16:14:13.000Z | Python/Django/Django/LoginAndRegistration/LoginAndRegistration/wsgi.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | null | null | null | Python/Django/Django/LoginAndRegistration/LoginAndRegistration/wsgi.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 6 | 2018-05-05T18:13:05.000Z | 2021-05-20T11:32:48.000Z | """
WSGI config for LoginAndRegistration project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LoginAndRegistration.settings")
application = get_wsgi_application()
| 24.588235 | 80 | 0.799043 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "LoginAndRegistration.settings")
application = get_wsgi_application()
| true | true |
f73b34484115fb55197b499b75b4fa1ae445e834 | 2,116 | py | Python | examples/gloo-arrows.py | antoineMoPa/glumpy | 901df7eb37cd728c2fe7e54920392b700b46c0ac | [
"BSD-3-Clause"
] | 1,074 | 2015-01-02T07:52:35.000Z | 2022-03-28T08:58:55.000Z | examples/gloo-arrows.py | antoineMoPa/glumpy | 901df7eb37cd728c2fe7e54920392b700b46c0ac | [
"BSD-3-Clause"
] | 273 | 2015-01-02T19:49:30.000Z | 2021-12-15T11:02:53.000Z | examples/gloo-arrows.py | antoineMoPa/glumpy | 901df7eb37cd728c2fe7e54920392b700b46c0ac | [
"BSD-3-Clause"
] | 206 | 2015-01-01T10:51:53.000Z | 2022-03-07T13:52:13.000Z | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import app, gl, gloo
from glumpy.transforms import Position, OrthographicProjection, PanZoom
# Create window
window = app.Window(width=2*512, height=512, color=(1,1,1,1))
# What to draw when necessary
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_POINTS)
program['orientation'][-1] += np.pi/1024.0
# Setup some markers
n = 500+1
data = np.zeros(n, dtype=[('position', np.float32, 2),
('fg_color', np.float32, 4),
('bg_color', np.float32, 4),
('size', np.float32, 1),
('head', np.float32, 1),
('orientation', np.float32, 1),
('linewidth', np.float32, 1)])
data = data.view(gloo.VertexBuffer)
data['linewidth'] = 1
data['fg_color'] = 0, 0, 0, 1
data['bg_color'] = 0, 0, 0, 1
data['orientation'] = 0
data['head'] = 0.25
radius, theta, dtheta = 245.0, 0.0, 6.5 / 180.0 * np.pi
for i in range(500):
theta += dtheta
x = 256 + radius * np.cos(theta)
y = 256 + radius * np.sin(theta)
r = 10.1 - i * 0.01
radius -= 0.4
data['orientation'][i] = theta + np.pi
data['position'][i] = x, y
data['size'][i] = 2 * r
data['linewidth'][i] = 1.5 - 0.5*i/500.
data['position'][-1] = 512+256, 256
data['size'][-1] = 512/np.sqrt(2)
data['linewidth'][-1] = 16.0
data['fg_color'][-1] = 0, 0, 0, 1
data['bg_color'][-1] = .95, .95, .95, 1
data['orientation'][-1] = 0
program = gloo.Program("arrows/arrow.vert", "arrows/arrow.frag")
program.bind(data)
program['antialias'] = 1.00
program['arrow'] = "stealth"
program['paint'] = "filled"
transform = OrthographicProjection(Position("position"))
program['transform'] = transform
window.attach(transform)
app.run()
| 33.587302 | 79 | 0.531191 |
import numpy as np
from glumpy import app, gl, gloo
from glumpy.transforms import Position, OrthographicProjection, PanZoom
window = app.Window(width=2*512, height=512, color=(1,1,1,1))
@window.event
def on_draw(dt):
window.clear()
program.draw(gl.GL_POINTS)
program['orientation'][-1] += np.pi/1024.0
n = 500+1
data = np.zeros(n, dtype=[('position', np.float32, 2),
('fg_color', np.float32, 4),
('bg_color', np.float32, 4),
('size', np.float32, 1),
('head', np.float32, 1),
('orientation', np.float32, 1),
('linewidth', np.float32, 1)])
data = data.view(gloo.VertexBuffer)
data['linewidth'] = 1
data['fg_color'] = 0, 0, 0, 1
data['bg_color'] = 0, 0, 0, 1
data['orientation'] = 0
data['head'] = 0.25
radius, theta, dtheta = 245.0, 0.0, 6.5 / 180.0 * np.pi
for i in range(500):
theta += dtheta
x = 256 + radius * np.cos(theta)
y = 256 + radius * np.sin(theta)
r = 10.1 - i * 0.01
radius -= 0.4
data['orientation'][i] = theta + np.pi
data['position'][i] = x, y
data['size'][i] = 2 * r
data['linewidth'][i] = 1.5 - 0.5*i/500.
data['position'][-1] = 512+256, 256
data['size'][-1] = 512/np.sqrt(2)
data['linewidth'][-1] = 16.0
data['fg_color'][-1] = 0, 0, 0, 1
data['bg_color'][-1] = .95, .95, .95, 1
data['orientation'][-1] = 0
program = gloo.Program("arrows/arrow.vert", "arrows/arrow.frag")
program.bind(data)
program['antialias'] = 1.00
program['arrow'] = "stealth"
program['paint'] = "filled"
transform = OrthographicProjection(Position("position"))
program['transform'] = transform
window.attach(transform)
app.run()
| true | true |
f73b3458510b6cdb2408061f0f271242b3ba5102 | 6,605 | py | Python | nibabies/cli/run.py | effigies/nibabies | 3572d71d7a4fb1796973e54f2af77d00758c99e1 | [
"Apache-2.0"
] | 1 | 2022-02-09T18:42:56.000Z | 2022-02-09T18:42:56.000Z | nibabies/cli/run.py | effigies/nibabies | 3572d71d7a4fb1796973e54f2af77d00758c99e1 | [
"Apache-2.0"
] | null | null | null | nibabies/cli/run.py | effigies/nibabies | 3572d71d7a4fb1796973e54f2af77d00758c99e1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""NiBabies runner."""
from .. import config
def main():
"""Entry point."""
from os import EX_SOFTWARE
from pathlib import Path
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description, write_bidsignore
parse_args()
# sentry_sdk = None
# if not config.execution.notrack:
# import sentry_sdk
# from ..utils.sentry import sentry_setup
# sentry_setup()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml"
config_file.parent.mkdir(exist_ok=True, parents=True)
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
nibabies_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of NiBabies).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if nibabies_wf and config.execution.write_graph:
nibabies_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (nibabies_wf is None) * EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), nibabies_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
# if sentry_sdk is not None:
# with sentry_sdk.configure_scope() as scope:
# scope.set_tag("run_uuid", config.execution.run_uuid)
# scope.set_tag("npart", len(config.execution.participant_label))
# sentry_sdk.add_breadcrumb(message="nibabies started", level="info")
# sentry_sdk.capture_message("nibabies started", level="info")
config.loggers.workflow.log(
15,
"\n".join(
["nibabies config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]
),
)
config.loggers.workflow.log(25, "nibabies started!")
# errno = 1 # Default is error exit unless otherwise set
try:
nibabies_wf.run(**config.nipype.get_plugin())
except Exception as e:
# if not config.execution.notrack:
# from ..utils.sentry import process_crashfile
# crashfolders = [
# config.execution.nibabies_dir,
# / "sub-{}".format(s)
# / "log"
# / config.execution.run_uuid
# for s in config.execution.participant_label
# ]
# for crashfolder in crashfolders:
# for crashfile in crashfolder.glob("crash*.*"):
# process_crashfile(crashfile)
# if "Workflow did not execute cleanly" not in str(e):
# sentry_sdk.capture_exception(e)
config.loggers.workflow.critical("nibabies failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "nibabies finished successfully!")
# if not config.execution.notrack:
# success_message = "nibabies finished without errors"
# sentry_sdk.add_breadcrumb(message=success_message, level="info")
# sentry_sdk.capture_message(success_message, level="info")
# Bother users with the boilerplate only iff the workflow went okay.
boiler_file = config.execution.nibabies_dir / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"nibabies-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this nibabies execution should include the "
f"boilerplate text found in {boiler_file}.",
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(
dseg_tsv, str(config.execution.nibabies_dir / "desc-aseg_dseg.tsv")
)
_copy_any(
dseg_tsv, str(config.execution.nibabies_dir / "desc-aparcaseg_dseg.tsv")
)
# errno = 0
finally:
from ..reports.core import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
generate_reports(
config.execution.participant_label,
config.execution.nibabies_dir,
config.execution.run_uuid,
config=pkgrf("nibabies", "data/reports-spec.yml"),
packagename="nibabies",
)
write_derivative_description(config.execution.bids_dir, config.execution.nibabies_dir)
write_bidsignore(config.execution.nibabies_dir)
# if failed_reports and not config.execution.notrack:
# sentry_sdk.capture_message(
# "Report generation failed for %d subjects" % failed_reports,
# level="error",
# )
# sys.exit(int((errno + failed_reports) > 0))
if __name__ == "__main__":
raise RuntimeError(
"Please `pip install` this and run via the commandline interfaces, `nibabies <command>`"
)
| 37.528409 | 96 | 0.626646 |
from .. import config
def main():
from os import EX_SOFTWARE
from pathlib import Path
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description, write_bidsignore
parse_args()
config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml"
config_file.parent.mkdir(exist_ok=True, parents=True)
config.to_filename(config_file)
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
nibabies_wf = retval.get("workflow", None)
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if nibabies_wf and config.execution.write_graph:
nibabies_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (nibabies_wf is None) * EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), nibabies_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
gc.collect()
config.loggers.workflow.log(
15,
"\n".join(
["nibabies config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]
),
)
config.loggers.workflow.log(25, "nibabies started!")
ipype.get_plugin())
except Exception as e:
config.loggers.workflow.critical("nibabies failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "nibabies finished successfully!")
boiler_file = config.execution.nibabies_dir / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"nibabies-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this nibabies execution should include the "
f"boilerplate text found in {boiler_file}.",
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(
dseg_tsv, str(config.execution.nibabies_dir / "desc-aseg_dseg.tsv")
)
_copy_any(
dseg_tsv, str(config.execution.nibabies_dir / "desc-aparcaseg_dseg.tsv")
)
finally:
from ..reports.core import generate_reports
from pkg_resources import resource_filename as pkgrf
generate_reports(
config.execution.participant_label,
config.execution.nibabies_dir,
config.execution.run_uuid,
config=pkgrf("nibabies", "data/reports-spec.yml"),
packagename="nibabies",
)
write_derivative_description(config.execution.bids_dir, config.execution.nibabies_dir)
write_bidsignore(config.execution.nibabies_dir)
if __name__ == "__main__":
raise RuntimeError(
"Please `pip install` this and run via the commandline interfaces, `nibabies <command>`"
)
| true | true |
f73b34adb34923d2309fafc833d7321b5a0b02dc | 8,441 | py | Python | photutils/aperture/mask.py | fdeugenio/photutils | 33c8b15cbbda85dc11c86a73217422dcb61398b7 | [
"BSD-3-Clause"
] | null | null | null | photutils/aperture/mask.py | fdeugenio/photutils | 33c8b15cbbda85dc11c86a73217422dcb61398b7 | [
"BSD-3-Clause"
] | null | null | null | photutils/aperture/mask.py | fdeugenio/photutils | 33c8b15cbbda85dc11c86a73217422dcb61398b7 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import astropy.units as u
__all__ = ['ApertureMask']
class ApertureMask:
"""
Class for an aperture mask.
Parameters
----------
data : array_like
A 2D array representing the fractional overlap of an aperture on
the pixel grid. This should be the full-sized (i.e. not
truncated) array that is the direct output of one of the
low-level `photutils.geometry` functions.
bbox : `photutils.BoundingBox`
The bounding box object defining the aperture minimal bounding
box.
"""
def __init__(self, data, bbox):
self.data = np.asanyarray(data)
if self.data.shape != bbox.shape:
raise ValueError('mask data and bounding box must have the same '
'shape')
self.bbox = bbox
self._mask = (self.data == 0)
def __array__(self):
"""
Array representation of the mask data array (e.g., for matplotlib).
"""
return self.data
@property
def shape(self):
"""
The shape of the mask data array.
"""
return self.data.shape
def _overlap_slices(self, shape):
"""
Calculate the slices for the overlapping part of the bounding
box and an array of the given shape.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of array where the slices are to be
applied.
Returns
-------
slices_large : tuple of slices
A tuple of slice objects for each axis of the large array,
such that ``large_array[slices_large]`` extracts the region
of the large array that overlaps with the small array.
slices_small : slice
A tuple of slice objects for each axis of the small array,
such that ``small_array[slices_small]`` extracts the region
of the small array that is inside the large array.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
xmin = self.bbox.ixmin
xmax = self.bbox.ixmax
ymin = self.bbox.iymin
ymax = self.bbox.iymax
if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:
# no overlap of the aperture with the data
return None, None
slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),
slice(max(xmin, 0), min(xmax, shape[1])))
slices_small = (slice(max(-ymin, 0),
min(ymax - ymin, shape[0] - ymin)),
slice(max(-xmin, 0),
min(xmax - xmin, shape[1] - xmin)))
return slices_large, slices_small
def _to_image_partial_overlap(self, image):
"""
Return an image of the mask in a 2D array, where the mask
is not fully within the image (i.e. partial or no overlap).
"""
# find the overlap of the mask on the output image shape
slices_large, slices_small = self._overlap_slices(image.shape)
if slices_small is None:
return None # no overlap
# insert the mask into the output image
image[slices_large] = self.data[slices_small]
return image
def to_image(self, shape):
"""
Return an image of the mask in a 2D array of the given shape,
taking any edge effects into account.
Parameters
----------
shape : tuple of int
The ``(ny, nx)`` shape of the output array.
Returns
-------
result : `~numpy.ndarray`
A 2D array of the mask.
"""
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
image = np.zeros(shape)
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
return self._to_image_partial_overlap(image)
try:
image[self.bbox.slices] = self.data
except ValueError: # partial or no overlap
image = self._to_image_partial_overlap(image)
return image
def cutout(self, data, fill_value=0., copy=False):
"""
Create a cutout from the input data over the mask bounding box,
taking any edge effects into account.
Parameters
----------
data : array_like
A 2D array on which to apply the aperture mask.
fill_value : float, optional
The value used to fill pixels where the aperture mask does
not overlap with the input ``data``. The default is 0.
copy : bool, optional
If `True` then the returned cutout array will always be hold
a copy of the input ``data``. If `False` and the mask is
fully within the input ``data``, then the returned cutout
array will be a view into the input ``data``. In cases
where the mask partially overlaps or has no overlap with the
input ``data``, the returned cutout array will always hold a
copy of the input ``data`` (i.e. this keyword has no
effect).
Returns
-------
result : `~numpy.ndarray`
A 2D array cut out from the input ``data`` representing the
same cutout region as the aperture mask. If there is a
partial overlap of the aperture mask with the input data,
pixels outside of the data will be assigned to
``fill_value``. `None` is returned if there is no overlap
of the aperture with the input ``data``.
"""
data = np.asanyarray(data)
if (data.ndim != 2) and (data.ndim != 3):
raise ValueError('data must be a 2D or 3D array.')
partial_overlap = False
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
partial_overlap = True
if not partial_overlap:
# try this for speed -- the result may still be a partial
# overlap, in which case the next block will be triggered
if copy:
cutout = data[(Ellipsis,)+self.bbox.slices].copy() # preserves Quantity
else:
cutout = data[(Ellipsis,)+self.bbox.slices]
if partial_overlap or (cutout.shape[-2:] != self.shape):
slices_large, slices_small = self._overlap_slices(data.shape[-2:])
if slices_small is None:
return None # no overlap
# cutout is a copy
output_shape = self.shape if data.ndim==2 else (data.shape[0],)+self.shape
cutout = np.zeros(output_shape, dtype=data.dtype)
cutout[:] = fill_value
cutout[(Ellipsis,)+slices_small] = data[(Ellipsis,)+slices_large]
if isinstance(data, u.Quantity):
cutout = u.Quantity(cutout, unit=data.unit)
return cutout
def multiply(self, data, fill_value=0.):
"""
Multiply the aperture mask with the input data, taking any edge
effects into account.
The result is a mask-weighted cutout from the data.
Parameters
----------
data : array_like or `~astropy.units.Quantity`
The 2D array to multiply with the aperture mask.
fill_value : float, optional
The value is used to fill pixels where the aperture mask
does not overlap with the input ``data``. The default is 0.
Returns
-------
result : `~numpy.ndarray` or `None`
A 2D mask-weighted cutout from the input ``data``. If there
is a partial overlap of the aperture mask with the input
data, pixels outside of the data will be assigned to
``fill_value`` before being multipled with the mask. `None`
is returned if there is no overlap of the aperture with the
input ``data``.
"""
# make a copy to prevent changing the input data
cutout = self.cutout(data, fill_value=fill_value, copy=True)
if cutout is None:
return None
else:
# needed to zero out non-finite data values outside of the
# aperture mask but within the bounding box
cutout[self._mask] = 0.
return cutout * self.data
| 33.764 | 88 | 0.573392 |
import numpy as np
import astropy.units as u
__all__ = ['ApertureMask']
class ApertureMask:
def __init__(self, data, bbox):
self.data = np.asanyarray(data)
if self.data.shape != bbox.shape:
raise ValueError('mask data and bounding box must have the same '
'shape')
self.bbox = bbox
self._mask = (self.data == 0)
def __array__(self):
return self.data
@property
def shape(self):
return self.data.shape
def _overlap_slices(self, shape):
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
xmin = self.bbox.ixmin
xmax = self.bbox.ixmax
ymin = self.bbox.iymin
ymax = self.bbox.iymax
if xmin >= shape[1] or ymin >= shape[0] or xmax <= 0 or ymax <= 0:
return None, None
slices_large = (slice(max(ymin, 0), min(ymax, shape[0])),
slice(max(xmin, 0), min(xmax, shape[1])))
slices_small = (slice(max(-ymin, 0),
min(ymax - ymin, shape[0] - ymin)),
slice(max(-xmin, 0),
min(xmax - xmin, shape[1] - xmin)))
return slices_large, slices_small
def _to_image_partial_overlap(self, image):
slices_large, slices_small = self._overlap_slices(image.shape)
if slices_small is None:
return None
image[slices_large] = self.data[slices_small]
return image
def to_image(self, shape):
if len(shape) != 2:
raise ValueError('input shape must have 2 elements.')
image = np.zeros(shape)
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
return self._to_image_partial_overlap(image)
try:
image[self.bbox.slices] = self.data
except ValueError:
image = self._to_image_partial_overlap(image)
return image
def cutout(self, data, fill_value=0., copy=False):
data = np.asanyarray(data)
if (data.ndim != 2) and (data.ndim != 3):
raise ValueError('data must be a 2D or 3D array.')
partial_overlap = False
if self.bbox.ixmin < 0 or self.bbox.iymin < 0:
partial_overlap = True
if not partial_overlap:
if copy:
cutout = data[(Ellipsis,)+self.bbox.slices].copy()
else:
cutout = data[(Ellipsis,)+self.bbox.slices]
if partial_overlap or (cutout.shape[-2:] != self.shape):
slices_large, slices_small = self._overlap_slices(data.shape[-2:])
if slices_small is None:
return None
output_shape = self.shape if data.ndim==2 else (data.shape[0],)+self.shape
cutout = np.zeros(output_shape, dtype=data.dtype)
cutout[:] = fill_value
cutout[(Ellipsis,)+slices_small] = data[(Ellipsis,)+slices_large]
if isinstance(data, u.Quantity):
cutout = u.Quantity(cutout, unit=data.unit)
return cutout
def multiply(self, data, fill_value=0.):
cutout = self.cutout(data, fill_value=fill_value, copy=True)
if cutout is None:
return None
else:
cutout[self._mask] = 0.
return cutout * self.data
| true | true |
f73b34da37fb2b8765527e7e8be2ce104a3916e3 | 8,702 | py | Python | main.py | Jarred-Sumner/im2smpl | cb3a09ee99815939e9f7d55479920a32703be9ce | [
"MIT",
"Unlicense"
] | 64 | 2019-11-30T09:15:21.000Z | 2022-03-15T21:19:24.000Z | main.py | Jarred-Sumner/im2smpl | cb3a09ee99815939e9f7d55479920a32703be9ce | [
"MIT",
"Unlicense"
] | 3 | 2020-01-15T08:47:38.000Z | 2021-11-08T11:35:54.000Z | main.py | Jarred-Sumner/im2smpl | cb3a09ee99815939e9f7d55479920a32703be9ce | [
"MIT",
"Unlicense"
] | 18 | 2019-12-07T23:31:01.000Z | 2022-03-04T05:44:41.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2019, Zerong Zheng (zzr18@mails.tsinghua.edu.cn)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function, absolute_import, division
import os
import numpy as np
import cv2 as cv
import argparse
import random
import string
import shutil
from subprocess import call
import pynvml
import time
pynvml.nvmlInit()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--img_file', type=str, required=True, help='path to image file')
parser.add_argument('--out_dir', type=str, required=True, help='output directory')
return parser.parse_args()
def waitgpu(empty_thres_duration=7):
empty_flag = 0
while True:
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
usage_percent = float(meminfo.used)/float(meminfo.total)
if usage_percent < 0.1:
if empty_flag >= empty_thres_duration: # empty for 5 second
break
empty_flag += 1
time.sleep(1)
continue
empty_flag = 0
print('GPU is busy right now....waiting....')
print('meminfo.used/meminfo.total = %f' % usage_percent)
time.sleep(np.random.randint(5, 15))
def detect_human(fname, out_dir):
""" obtains bounding box of the subject in the input image"""
waitgpu()
print('\n\nStep 1. Human Detection RCNN')
# generate a temporal script to call RCNN
shutil.copy('./detect_human.py', './AlphaPose/human-detection/tools/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./AlphaPose/human-detection/tools\n')
fp.write('python2 detect_human.py --img_file %s --out_dir %s\n'
% (fname, out_dir))
fp.write('cd ../../../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
# os.remove('./AlphaPose/human-detection/tools/detect_human.py')
def crop_or_pad_img(fname, out_dir):
""" crops or pads the original image to make the subject located at the center
of the image and occupy 90% of the image
"""
print('\n\nStep 2. Image cropping or padding')
img_dir, img_name = os.path.split(img_fname)
with open(os.path.join(out_dir, img_name + '.bbox.txt'), 'r') as fp:
lines = fp.readlines()
if lines[-1] == '':
lines = lines[:-1]
if len(lines) > 1:
print('Warning: More than 1 bounding boxes are detected. '
'Only the first one is used.')
entries = lines[0].split(' ')
xmin, ymin = int(entries[0]), int(entries[1])
xmax, ymax = int(entries[2]), int(entries[3])
x_center = int((xmin+xmax)/2)
y_center = int((ymin+ymax)/2)
edge_len = int(max(xmax-xmin, ymax-ymin) * 1.2)
edge_len_half = int(edge_len/2)
img = cv.imread(fname)
cv.imwrite(os.path.join(out_dir, img_name[:-4]+'_orig.png'), img)
if len(img.shape) == 2:
img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
h, w = img.shape[0], img.shape[1]
img_pad = np.zeros((3*h, 3*w, 3), dtype=np.uint8)
img_pad[h:(h*2), w:(w*2), :] = img
crop_tl = (h+y_center-edge_len_half, w+x_center-edge_len_half)
crop_dr = (h+y_center+edge_len_half, w+x_center+edge_len_half)
img_crop = img_pad[crop_tl[0]:crop_dr[0], crop_tl[1]:crop_dr[1], :]
cv.imwrite(os.path.join(out_dir, img_name), img_crop)
cv.imwrite(os.path.join(out_dir, img_name), img_crop)
def infer_smpl_and_pose(fname, out_dir):
waitgpu()
print('\n\nStep 3a Body model estimation using HMR. ')
shutil.copy('./infer_smpl.py', './hmr/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./hmr/\n')
fp.write('python2 infer_smpl.py --img_path %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
# os.remove('./hmr/infer_smpl.py')
print('\n\nStep 3b Pose estimation using AlphaPose')
img_dir, img_name = os.path.split(img_fname)
tmp_folder = ''.join(random.sample(string.ascii_letters + string.digits, 8))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'demo'))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'results'))
call(['cp', os.path.join(out_dir, img_name),
os.path.join('./AlphaPose/examples', tmp_folder, 'demo/1.jpg')])
call(['./AlphaPose/run.sh', '--indir', os.path.join('./examples', tmp_folder, 'demo'),
'--outdir', os.path.join('./examples', tmp_folder, 'results'), '--vis'])
call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/pred.txt'),
os.path.join(out_dir, img_name+'.joints.txt')])
call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/scores.txt'),
os.path.join(out_dir, img_name+'.joint_scores.txt')])
call(['rm', '-r', os.path.join('./AlphaPose/examples', tmp_folder)])
print('\n\nStep 3c Image segmentation')
shutil.copy('./segment_by_parsing.py', './LIP_JPPNet/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./LIP_JPPNet/\n')
fp.write('python2 segment_by_parsing.py --img_file %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
# os.remove('./LIP_JPPNet/segment_by_parsing.py')
def optimize_smpl(fname, out_dir):
print('\n\nStep 4 SMPL model optimization')
shutil.copy('./fit_3d_accurate.py', './smplify_public/code/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./smplify_public/code\n')
fp.write('python2 fit_3d_accurate.py --img_file %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
# os.remove('smplify_public/code/fit_3d_accurate.py')
def main(img_fname, out_dir):
print('image file: ' + img_fname)
print('output directory: ' + out_dir)
if not os.path.isfile(img_fname):
raise IOError('Image file does not exist!')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
detect_human(img_fname, out_dir)
crop_or_pad_img(img_fname, out_dir)
infer_smpl_and_pose(img_fname, out_dir)
optimize_smpl(img_fname, out_dir)
if __name__ == '__main__':
args = parse_args()
img_fname = args.img_file
out_dir = args.out_dir
img_fname = os.path.abspath(img_fname)
out_dir = os.path.abspath(out_dir)
main(img_fname, out_dir)
| 42.44878 | 97 | 0.666858 |
from __future__ import print_function, absolute_import, division
import os
import numpy as np
import cv2 as cv
import argparse
import random
import string
import shutil
from subprocess import call
import pynvml
import time
pynvml.nvmlInit()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--img_file', type=str, required=True, help='path to image file')
parser.add_argument('--out_dir', type=str, required=True, help='output directory')
return parser.parse_args()
def waitgpu(empty_thres_duration=7):
empty_flag = 0
while True:
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
usage_percent = float(meminfo.used)/float(meminfo.total)
if usage_percent < 0.1:
if empty_flag >= empty_thres_duration:
break
empty_flag += 1
time.sleep(1)
continue
empty_flag = 0
print('GPU is busy right now....waiting....')
print('meminfo.used/meminfo.total = %f' % usage_percent)
time.sleep(np.random.randint(5, 15))
def detect_human(fname, out_dir):
waitgpu()
print('\n\nStep 1. Human Detection RCNN')
shutil.copy('./detect_human.py', './AlphaPose/human-detection/tools/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./AlphaPose/human-detection/tools\n')
fp.write('python2 detect_human.py --img_file %s --out_dir %s\n'
% (fname, out_dir))
fp.write('cd ../../../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
def crop_or_pad_img(fname, out_dir):
print('\n\nStep 2. Image cropping or padding')
img_dir, img_name = os.path.split(img_fname)
with open(os.path.join(out_dir, img_name + '.bbox.txt'), 'r') as fp:
lines = fp.readlines()
if lines[-1] == '':
lines = lines[:-1]
if len(lines) > 1:
print('Warning: More than 1 bounding boxes are detected. '
'Only the first one is used.')
entries = lines[0].split(' ')
xmin, ymin = int(entries[0]), int(entries[1])
xmax, ymax = int(entries[2]), int(entries[3])
x_center = int((xmin+xmax)/2)
y_center = int((ymin+ymax)/2)
edge_len = int(max(xmax-xmin, ymax-ymin) * 1.2)
edge_len_half = int(edge_len/2)
img = cv.imread(fname)
cv.imwrite(os.path.join(out_dir, img_name[:-4]+'_orig.png'), img)
if len(img.shape) == 2:
img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
h, w = img.shape[0], img.shape[1]
img_pad = np.zeros((3*h, 3*w, 3), dtype=np.uint8)
img_pad[h:(h*2), w:(w*2), :] = img
crop_tl = (h+y_center-edge_len_half, w+x_center-edge_len_half)
crop_dr = (h+y_center+edge_len_half, w+x_center+edge_len_half)
img_crop = img_pad[crop_tl[0]:crop_dr[0], crop_tl[1]:crop_dr[1], :]
cv.imwrite(os.path.join(out_dir, img_name), img_crop)
cv.imwrite(os.path.join(out_dir, img_name), img_crop)
def infer_smpl_and_pose(fname, out_dir):
waitgpu()
print('\n\nStep 3a Body model estimation using HMR. ')
shutil.copy('./infer_smpl.py', './hmr/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./hmr/\n')
fp.write('python2 infer_smpl.py --img_path %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
print('\n\nStep 3b Pose estimation using AlphaPose')
img_dir, img_name = os.path.split(img_fname)
tmp_folder = ''.join(random.sample(string.ascii_letters + string.digits, 8))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'demo'))
os.mkdir(os.path.join('./AlphaPose/examples', tmp_folder, 'results'))
call(['cp', os.path.join(out_dir, img_name),
os.path.join('./AlphaPose/examples', tmp_folder, 'demo/1.jpg')])
call(['./AlphaPose/run.sh', '--indir', os.path.join('./examples', tmp_folder, 'demo'),
'--outdir', os.path.join('./examples', tmp_folder, 'results'), '--vis'])
call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/pred.txt'),
os.path.join(out_dir, img_name+'.joints.txt')])
call(['mv', os.path.join('./AlphaPose/examples', tmp_folder, 'results/POSE/scores.txt'),
os.path.join(out_dir, img_name+'.joint_scores.txt')])
call(['rm', '-r', os.path.join('./AlphaPose/examples', tmp_folder)])
print('\n\nStep 3c Image segmentation')
shutil.copy('./segment_by_parsing.py', './LIP_JPPNet/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./LIP_JPPNet/\n')
fp.write('python2 segment_by_parsing.py --img_file %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
def optimize_smpl(fname, out_dir):
print('\n\nStep 4 SMPL model optimization')
shutil.copy('./fit_3d_accurate.py', './smplify_public/code/')
temp_shname = ''.join(random.sample(string.ascii_letters + string.digits, 8)) + '.sh'
temp_shname = os.path.join('./', temp_shname)
with open(temp_shname, 'w') as fp:
fp.write('#!/usr/local/bin/bash\n')
fp.write('cd ./smplify_public/code\n')
fp.write('python2 fit_3d_accurate.py --img_file %s --out_dir %s\n' % (fname, out_dir))
fp.write('cd ../../\n')
call(['sh', temp_shname])
os.remove(temp_shname)
def main(img_fname, out_dir):
print('image file: ' + img_fname)
print('output directory: ' + out_dir)
if not os.path.isfile(img_fname):
raise IOError('Image file does not exist!')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
detect_human(img_fname, out_dir)
crop_or_pad_img(img_fname, out_dir)
infer_smpl_and_pose(img_fname, out_dir)
optimize_smpl(img_fname, out_dir)
if __name__ == '__main__':
args = parse_args()
img_fname = args.img_file
out_dir = args.out_dir
img_fname = os.path.abspath(img_fname)
out_dir = os.path.abspath(out_dir)
main(img_fname, out_dir)
| true | true |
f73b35030acd31c1f8f91e7064a382eb64e75577 | 2,676 | py | Python | tests/collaborative_tests.py | mostafa-mahmoud/HyPRec | f18318f179dd9f9af7cf01a11f13f0aefb42b3bb | [
"Apache-2.0"
] | 5 | 2017-07-17T12:55:45.000Z | 2020-07-20T09:13:10.000Z | tests/collaborative_tests.py | mostafa-mahmoud/HyPRec | f18318f179dd9f9af7cf01a11f13f0aefb42b3bb | [
"Apache-2.0"
] | 1 | 2019-05-02T20:59:53.000Z | 2019-05-02T20:59:53.000Z | tests/collaborative_tests.py | mostafa-mahmoud/HyPRec | f18318f179dd9f9af7cf01a11f13f0aefb42b3bb | [
"Apache-2.0"
] | 5 | 2017-07-30T10:50:07.000Z | 2019-08-30T20:02:47.000Z | #!/usr/bin/env python
import numpy
import unittest
from lib.abstract_recommender import AbstractRecommender
from lib.collaborative_filtering import CollaborativeFiltering
from lib.evaluator import Evaluator
from util.data_parser import DataParser
from util.model_initializer import ModelInitializer
class TestcaseBase(unittest.TestCase):
def setUp(self):
"""
Setup method that is called at the beginning of each test.
"""
self.documents, self.users = 30, 4
documents_cnt, users_cnt = self.documents, self.users
self.n_factors = 5
self.n_iterations = 20
self.k_folds = 3
self.hyperparameters = {'n_factors': self.n_factors, '_lambda': 0.01}
self.options = {'k_folds': self.k_folds, 'n_iterations': self.n_iterations}
self.initializer = ModelInitializer(self.hyperparameters.copy(), self.n_iterations)
def mock_get_ratings_matrix(self=None):
return [[int(not bool((article + user) % 3)) for article in range(documents_cnt)]
for user in range(users_cnt)]
self.ratings_matrix = numpy.array(mock_get_ratings_matrix())
self.evaluator = Evaluator(self.ratings_matrix)
setattr(DataParser, "get_ratings_matrix", mock_get_ratings_matrix)
class TestALS(TestcaseBase):
def runTest(self):
cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,
self.options, load_matrices=False)
self.assertEqual(cf.n_factors, self.n_factors)
self.assertEqual(cf.n_items, self.documents)
cf.train()
self.assertEqual(cf.get_predictions().shape, (self.users, self.documents))
self.assertTrue(isinstance(cf, AbstractRecommender))
shape = (self.users, self.documents)
ratings = cf.get_ratings()
self.assertLessEqual(numpy.amax(ratings), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(ratings), -1e-6)
self.assertTrue(ratings.shape == shape)
rounded_predictions = cf.rounded_predictions()
self.assertLessEqual(numpy.amax(rounded_predictions), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(rounded_predictions), -1e-6)
self.assertTrue(rounded_predictions.shape == shape)
recall = cf.evaluator.calculate_recall(ratings, cf.get_predictions())
self.assertTrue(-1e-6 <= recall <= 1 + 1e-6)
random_user = int(numpy.random.random() * self.users)
random_item = int(numpy.random.random() * self.documents)
random_prediction = cf.predict(random_user, random_item)
self.assertTrue(isinstance(random_prediction, numpy.float64))
| 46.947368 | 93 | 0.690957 |
import numpy
import unittest
from lib.abstract_recommender import AbstractRecommender
from lib.collaborative_filtering import CollaborativeFiltering
from lib.evaluator import Evaluator
from util.data_parser import DataParser
from util.model_initializer import ModelInitializer
class TestcaseBase(unittest.TestCase):
def setUp(self):
self.documents, self.users = 30, 4
documents_cnt, users_cnt = self.documents, self.users
self.n_factors = 5
self.n_iterations = 20
self.k_folds = 3
self.hyperparameters = {'n_factors': self.n_factors, '_lambda': 0.01}
self.options = {'k_folds': self.k_folds, 'n_iterations': self.n_iterations}
self.initializer = ModelInitializer(self.hyperparameters.copy(), self.n_iterations)
def mock_get_ratings_matrix(self=None):
return [[int(not bool((article + user) % 3)) for article in range(documents_cnt)]
for user in range(users_cnt)]
self.ratings_matrix = numpy.array(mock_get_ratings_matrix())
self.evaluator = Evaluator(self.ratings_matrix)
setattr(DataParser, "get_ratings_matrix", mock_get_ratings_matrix)
class TestALS(TestcaseBase):
def runTest(self):
cf = CollaborativeFiltering(self.initializer, self.evaluator, self.hyperparameters,
self.options, load_matrices=False)
self.assertEqual(cf.n_factors, self.n_factors)
self.assertEqual(cf.n_items, self.documents)
cf.train()
self.assertEqual(cf.get_predictions().shape, (self.users, self.documents))
self.assertTrue(isinstance(cf, AbstractRecommender))
shape = (self.users, self.documents)
ratings = cf.get_ratings()
self.assertLessEqual(numpy.amax(ratings), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(ratings), -1e-6)
self.assertTrue(ratings.shape == shape)
rounded_predictions = cf.rounded_predictions()
self.assertLessEqual(numpy.amax(rounded_predictions), 1 + 1e-6)
self.assertGreaterEqual(numpy.amin(rounded_predictions), -1e-6)
self.assertTrue(rounded_predictions.shape == shape)
recall = cf.evaluator.calculate_recall(ratings, cf.get_predictions())
self.assertTrue(-1e-6 <= recall <= 1 + 1e-6)
random_user = int(numpy.random.random() * self.users)
random_item = int(numpy.random.random() * self.documents)
random_prediction = cf.predict(random_user, random_item)
self.assertTrue(isinstance(random_prediction, numpy.float64))
| true | true |
f73b375d70bfbf2e5d11847c4bf26e1158c3e175 | 3,697 | py | Python | scripts/ingestors/ctre_bridge.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | 1 | 2019-10-07T17:01:24.000Z | 2019-10-07T17:01:24.000Z | scripts/ingestors/ctre_bridge.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | scripts/ingestors/ctre_bridge.py | trentford/iem | 7264d24f2d79a3cd69251a09758e6531233a732f | [
"MIT"
] | null | null | null | """
Need something to ingest the CTRE provided bridge data
RSAI4
RLRI4
Run from RUN_1MIN
"""
from __future__ import print_function
import datetime
import sys
from io import BytesIO
import ftplib
import subprocess
import pytz
import pyiem.util as util
from pyiem.observation import Observation
def main():
"""Go Main Go"""
# Run every 3 minutes
now = datetime.datetime.now()
if now.minute % 4 != 0 and len(sys.argv) < 2:
return
props = util.get_properties()
pgconn = util.get_dbconn('iem')
icursor = pgconn.cursor()
csv = open('/tmp/ctre.txt', 'w')
bio = BytesIO()
# Get Saylorville
try:
ftp = ftplib.FTP('129.186.224.167')
ftp.login(props['ctre_ftpuser'], props['ctre_ftppass'])
ftp.retrbinary('RETR Saylorville_Table3Min_current.dat',
bio.write)
ftp.close()
except Exception as exp:
if now.minute % 15 == 0:
print('Download CTRE Bridge Data Failed!!!\n%s' % (exp, ))
return
bio.seek(0)
data = bio.getvalue().decode('ascii').split("\n")
bio.truncate()
if len(data) < 2:
return
keys = data[0].strip().replace('"', '').split(',')
vals = data[1].strip().replace('"', '').split(',')
d = {}
for i in range(len(vals)):
d[keys[i]] = vals[i]
# Ob times are always CDT
ts1 = datetime.datetime.strptime(d['TIMESTAMP'], '%Y-%m-%d %H:%M:%S')
gts1 = ts1 + datetime.timedelta(hours=5)
gts1 = gts1.replace(tzinfo=pytz.utc)
lts = gts1.astimezone(pytz.timezone("America/Chicago"))
iem = Observation('RSAI4', "OT", lts)
drct = d['WindDir']
iem.data['drct'] = drct
sknt = float(d['WS_mph_S_WVT']) / 1.15
iem.data['sknt'] = sknt
gust = float(d['WS_mph_Max']) / 1.15
iem.data['gust'] = gust
iem.save(icursor)
csv.write("%s,%s,%s,%.1f,%.1f\n" % ('RSAI4',
gts1.strftime("%Y/%m/%d %H:%M:%S"),
drct, sknt, gust))
# Red Rock
try:
ftp = ftplib.FTP('129.186.224.167')
ftp.login(props['ctre_ftpuser'], props['ctre_ftppass'])
ftp.retrbinary('RETR Red Rock_Table3Min_current.dat',
bio.write)
ftp.close()
except Exception as exp:
if now.minute % 15 == 0:
print('Download CTRE Bridge Data Failed!!!\n%s' % (exp, ))
return
bio.seek(0)
data = bio.getvalue().decode('ascii').split("\n")
bio.truncate()
if len(data) < 2:
return
keys = data[0].strip().replace('"', '').split(',')
vals = data[1].strip().replace('"', '').split(',')
d = {}
for i in range(len(vals)):
d[keys[i]] = vals[i]
ts2 = datetime.datetime.strptime(d['TIMESTAMP'], '%Y-%m-%d %H:%M:%S')
gts2 = ts2 + datetime.timedelta(hours=5)
gts2 = gts2.replace(tzinfo=pytz.timezone("UTC"))
lts = gts2.astimezone(pytz.timezone("America/Chicago"))
iem = Observation('RLRI4', "OT", lts)
drct = d['WindDir']
iem.data['drct'] = drct
sknt = float(d['WS_mph_S_WVT']) / 1.15
iem.data['sknt'] = sknt
gust = float(d['WS_mph_Max']) / 1.15
iem.data['gust'] = gust
iem.save(icursor)
csv.write("%s,%s,%s,%.1f,%.1f\n" % ('RLRI4',
gts2.strftime("%Y/%m/%d %H:%M:%S"),
drct, sknt, gust))
csv.close()
cmd = ("/home/ldm/bin/pqinsert -i -p 'data c %s csv/ctre.txt "
"bogus txt' /tmp/ctre.txt") % (now.strftime("%Y%m%d%H%M"),)
subprocess.call(cmd, shell=True)
icursor.close()
pgconn.commit()
pgconn.close()
if __name__ == '__main__':
main()
| 29.34127 | 75 | 0.548553 | from __future__ import print_function
import datetime
import sys
from io import BytesIO
import ftplib
import subprocess
import pytz
import pyiem.util as util
from pyiem.observation import Observation
def main():
now = datetime.datetime.now()
if now.minute % 4 != 0 and len(sys.argv) < 2:
return
props = util.get_properties()
pgconn = util.get_dbconn('iem')
icursor = pgconn.cursor()
csv = open('/tmp/ctre.txt', 'w')
bio = BytesIO()
try:
ftp = ftplib.FTP('129.186.224.167')
ftp.login(props['ctre_ftpuser'], props['ctre_ftppass'])
ftp.retrbinary('RETR Saylorville_Table3Min_current.dat',
bio.write)
ftp.close()
except Exception as exp:
if now.minute % 15 == 0:
print('Download CTRE Bridge Data Failed!!!\n%s' % (exp, ))
return
bio.seek(0)
data = bio.getvalue().decode('ascii').split("\n")
bio.truncate()
if len(data) < 2:
return
keys = data[0].strip().replace('"', '').split(',')
vals = data[1].strip().replace('"', '').split(',')
d = {}
for i in range(len(vals)):
d[keys[i]] = vals[i]
ts1 = datetime.datetime.strptime(d['TIMESTAMP'], '%Y-%m-%d %H:%M:%S')
gts1 = ts1 + datetime.timedelta(hours=5)
gts1 = gts1.replace(tzinfo=pytz.utc)
lts = gts1.astimezone(pytz.timezone("America/Chicago"))
iem = Observation('RSAI4', "OT", lts)
drct = d['WindDir']
iem.data['drct'] = drct
sknt = float(d['WS_mph_S_WVT']) / 1.15
iem.data['sknt'] = sknt
gust = float(d['WS_mph_Max']) / 1.15
iem.data['gust'] = gust
iem.save(icursor)
csv.write("%s,%s,%s,%.1f,%.1f\n" % ('RSAI4',
gts1.strftime("%Y/%m/%d %H:%M:%S"),
drct, sknt, gust))
try:
ftp = ftplib.FTP('129.186.224.167')
ftp.login(props['ctre_ftpuser'], props['ctre_ftppass'])
ftp.retrbinary('RETR Red Rock_Table3Min_current.dat',
bio.write)
ftp.close()
except Exception as exp:
if now.minute % 15 == 0:
print('Download CTRE Bridge Data Failed!!!\n%s' % (exp, ))
return
bio.seek(0)
data = bio.getvalue().decode('ascii').split("\n")
bio.truncate()
if len(data) < 2:
return
keys = data[0].strip().replace('"', '').split(',')
vals = data[1].strip().replace('"', '').split(',')
d = {}
for i in range(len(vals)):
d[keys[i]] = vals[i]
ts2 = datetime.datetime.strptime(d['TIMESTAMP'], '%Y-%m-%d %H:%M:%S')
gts2 = ts2 + datetime.timedelta(hours=5)
gts2 = gts2.replace(tzinfo=pytz.timezone("UTC"))
lts = gts2.astimezone(pytz.timezone("America/Chicago"))
iem = Observation('RLRI4', "OT", lts)
drct = d['WindDir']
iem.data['drct'] = drct
sknt = float(d['WS_mph_S_WVT']) / 1.15
iem.data['sknt'] = sknt
gust = float(d['WS_mph_Max']) / 1.15
iem.data['gust'] = gust
iem.save(icursor)
csv.write("%s,%s,%s,%.1f,%.1f\n" % ('RLRI4',
gts2.strftime("%Y/%m/%d %H:%M:%S"),
drct, sknt, gust))
csv.close()
cmd = ("/home/ldm/bin/pqinsert -i -p 'data c %s csv/ctre.txt "
"bogus txt' /tmp/ctre.txt") % (now.strftime("%Y%m%d%H%M"),)
subprocess.call(cmd, shell=True)
icursor.close()
pgconn.commit()
pgconn.close()
if __name__ == '__main__':
main()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.