max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
fsb/handlers/watchers.py | licwim/feature-storage-bot | 0 | 6612251 | # !/usr/bin/env python
import re
from peewee import DoesNotExist
from . import Handler
from . import MessageHandler
from .commands import BaseCommand
from ..db.models import Chat
from ..db.models import MemberRole
from ..db.models import Role
from ..error import ExitHandlerException
from ..telegram.client import TelegramApiClient
class BaseWatcher(MessageHandler):
def __init__(self, client: TelegramApiClient, filter: callable):
super().__init__(client)
self._filter = filter
async def _init_filter(self, event):
await super()._init_filter(event)
if event.message.text.startswith(BaseCommand.PREFIX) or not self._filter(event):
raise ExitHandlerException
class MentionWatcher(BaseWatcher):
UNKNOWN_NAME_REPLACEMENT = "ты"
def __init__(self, client: TelegramApiClient):
super().__init__(client, self.filter)
@Handler.handle_decorator
async def handle(self, event):
await super().handle(event)
members = await self._client.get_dialog_members(self.entity)
members.remove(event.sender)
mentions = [matches[1] for matches in re.findall(r"(\s+|^)@([^\s@]+)", event.message.text)]
mentions_strings = []
for mention in mentions:
mention_string, added_members = self._resolve_mention(mention, members)
if mention_string:
mentions_strings.append(mention_string)
for member in added_members:
members.remove(member)
if mentions_strings:
await self._client.send_message(
self.entity,
' '.join(mentions_strings),
event.message
)
def _make_mention_string(self, members: list, rank_mention: bool = False):
result_mentions = []
for member in members:
rank = None
if rank_mention:
try:
rank = member.participant.rank
except AttributeError:
pass
if rank:
result_mentions.append(f"[{rank}](tg://user?id={str(member.id)})")
elif member.username:
result_mentions.append('@' + member.username)
else:
member_name = member.first_name if member.first_name else MentionWatcher.UNKNOWN_NAME_REPLACEMENT
result_mentions.append(f"[{member_name}](tg://user?id={str(member.id)})")
return ' '.join(result_mentions)
def _resolve_mention(self, mention: str, members: list) -> tuple:
match mention:
case 'all':
rank = False
case 'allrank':
rank = True
case _:
return self._resolve_custom_mention(mention, members)
return self._make_mention_string(members, rank), members
def _resolve_custom_mention(self, mention: str, original_members: list) -> tuple:
try:
role_members = MemberRole.select().where(
MemberRole.role == Role.get(
Role.chat == Chat.get(Chat.telegram_id == self.entity.id).get_id(),
Role.nickname == mention
)
)
members_ids = [role_member.member.user.telegram_id for role_member in role_members]
members = [member for member in original_members if member.id in members_ids]
except DoesNotExist:
return None, []
return self._make_mention_string(members, False), members
@staticmethod
def filter(event):
if re.search(r"(\s+|^)@([^\s]+)", event.message.text):
return True
else:
return False
| # !/usr/bin/env python
import re
from peewee import DoesNotExist
from . import Handler
from . import MessageHandler
from .commands import BaseCommand
from ..db.models import Chat
from ..db.models import MemberRole
from ..db.models import Role
from ..error import ExitHandlerException
from ..telegram.client import TelegramApiClient
class BaseWatcher(MessageHandler):
def __init__(self, client: TelegramApiClient, filter: callable):
super().__init__(client)
self._filter = filter
async def _init_filter(self, event):
await super()._init_filter(event)
if event.message.text.startswith(BaseCommand.PREFIX) or not self._filter(event):
raise ExitHandlerException
class MentionWatcher(BaseWatcher):
UNKNOWN_NAME_REPLACEMENT = "ты"
def __init__(self, client: TelegramApiClient):
super().__init__(client, self.filter)
@Handler.handle_decorator
async def handle(self, event):
await super().handle(event)
members = await self._client.get_dialog_members(self.entity)
members.remove(event.sender)
mentions = [matches[1] for matches in re.findall(r"(\s+|^)@([^\s@]+)", event.message.text)]
mentions_strings = []
for mention in mentions:
mention_string, added_members = self._resolve_mention(mention, members)
if mention_string:
mentions_strings.append(mention_string)
for member in added_members:
members.remove(member)
if mentions_strings:
await self._client.send_message(
self.entity,
' '.join(mentions_strings),
event.message
)
def _make_mention_string(self, members: list, rank_mention: bool = False):
result_mentions = []
for member in members:
rank = None
if rank_mention:
try:
rank = member.participant.rank
except AttributeError:
pass
if rank:
result_mentions.append(f"[{rank}](tg://user?id={str(member.id)})")
elif member.username:
result_mentions.append('@' + member.username)
else:
member_name = member.first_name if member.first_name else MentionWatcher.UNKNOWN_NAME_REPLACEMENT
result_mentions.append(f"[{member_name}](tg://user?id={str(member.id)})")
return ' '.join(result_mentions)
def _resolve_mention(self, mention: str, members: list) -> tuple:
match mention:
case 'all':
rank = False
case 'allrank':
rank = True
case _:
return self._resolve_custom_mention(mention, members)
return self._make_mention_string(members, rank), members
def _resolve_custom_mention(self, mention: str, original_members: list) -> tuple:
try:
role_members = MemberRole.select().where(
MemberRole.role == Role.get(
Role.chat == Chat.get(Chat.telegram_id == self.entity.id).get_id(),
Role.nickname == mention
)
)
members_ids = [role_member.member.user.telegram_id for role_member in role_members]
members = [member for member in original_members if member.id in members_ids]
except DoesNotExist:
return None, []
return self._make_mention_string(members, False), members
@staticmethod
def filter(event):
if re.search(r"(\s+|^)@([^\s]+)", event.message.text):
return True
else:
return False
| fr | 0.163581 | # !/usr/bin/env python | 2.052833 | 2 |
myDevices/devices/analog/mcp492X.py | wjl198435/Cayenne-Agent | 20 | 6612252 | # Copyright 2012-2013 <NAME> - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from myDevices.utils.types import toint
from myDevices.devices.spi import SPI
from myDevices.devices.analog import DAC
class MCP492X(SPI, DAC):
def __init__(self, chip, channelCount, vref):
SPI.__init__(self, toint(chip), 0, 8, 10000000)
DAC.__init__(self, channelCount, 12, float(vref))
self.buffered=False
self.gain=False
self.shutdown=False
self.values = [0 for i in range(channelCount)]
def __str__(self):
return "MCP492%d(chip=%d)" % (self._analogCount, self.chip)
def __analogRead__(self, channel, diff=False):
return self.values[channel]
def __analogWrite__(self, channel, value):
d = bytearray(2)
d[0] = 0
d[0] |= (channel & 0x01) << 7
d[0] |= (self.buffered & 0x01) << 6
d[0] |= (not self.gain & 0x01) << 5
d[0] |= (not self.shutdown & 0x01) << 4
d[0] |= (value >> 8) & 0x0F
d[1] = value & 0xFF
self.writeBytes(d)
self.values[channel] = value
class MCP4921(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 1, vref)
class MCP4922(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 2, vref)
| # Copyright 2012-2013 <NAME> - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from myDevices.utils.types import toint
from myDevices.devices.spi import SPI
from myDevices.devices.analog import DAC
class MCP492X(SPI, DAC):
def __init__(self, chip, channelCount, vref):
SPI.__init__(self, toint(chip), 0, 8, 10000000)
DAC.__init__(self, channelCount, 12, float(vref))
self.buffered=False
self.gain=False
self.shutdown=False
self.values = [0 for i in range(channelCount)]
def __str__(self):
return "MCP492%d(chip=%d)" % (self._analogCount, self.chip)
def __analogRead__(self, channel, diff=False):
return self.values[channel]
def __analogWrite__(self, channel, value):
d = bytearray(2)
d[0] = 0
d[0] |= (channel & 0x01) << 7
d[0] |= (self.buffered & 0x01) << 6
d[0] |= (not self.gain & 0x01) << 5
d[0] |= (not self.shutdown & 0x01) << 4
d[0] |= (value >> 8) & 0x0F
d[1] = value & 0xFF
self.writeBytes(d)
self.values[channel] = value
class MCP4921(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 1, vref)
class MCP4922(MCP492X):
def __init__(self, chip=0, vref=3.3):
MCP492X.__init__(self, chip, 2, vref)
| en | 0.837713 | # Copyright 2012-2013 <NAME> - trouch.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.385559 | 2 |
tests/test_cli.py | jacksmith15/jsonschema-lint | 3 | 6612253 | <reponame>jacksmith15/jsonschema-lint
import subprocess
from pathlib import Path
import pytest
ASSETS_DIR = Path(__file__).parent / "assets"
SIMPLE_DIR = ASSETS_DIR / "simple"
def test_it_uses_rc_file():
result = subprocess.run(["jsonschema-lint"], cwd=SIMPLE_DIR, capture_output=True, text=True)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
object/instances/002.json:1:1:1:15: Additional properties are not allowed ('qux' was unexpected)
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
numbers/instances/002.yaml:2:3:2:7: 'spam' is not of type 'number'
numbers/instances/002.yaml:2:3:2:7: 'spam' is not one of [1, 2, 3]
numbers/instances/002.yaml:2:1:4:1: ['spam', 2] is too short
object/instances/002.yml:1:1:2:1: Additional properties are not allowed ('qux' was unexpected)
""".lstrip()
)
def test_it_filters_on_provided_files():
result = subprocess.run(
["jsonschema-lint", "numbers/instances/002.json"], cwd=SIMPLE_DIR, capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
""".lstrip()
)
def test_it_uses_specified_schema():
result = subprocess.run(
["jsonschema-lint", "--schema", "numbers/schema.json", *SIMPLE_DIR.glob("**/instances/**/*.json")],
cwd=SIMPLE_DIR,
capture_output=True,
text=True,
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
object/instances/001.json:1:1:1:15: {'foo': 'bar'} is not of type 'array'
object/instances/002.json:1:1:1:15: {'qux': 'mux'} is not of type 'array'
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
""".lstrip()
)
def test_it_uses_yaml_schema():
result = subprocess.run(
["jsonschema-lint"], cwd=ASSETS_DIR / "extra" / "yaml-schema", capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
instances/002.json:3:16:3:17: 1 is not of type 'string'
""".lstrip()
)
def test_it_uses_remote_schema(mock_remote_schema_server):
result = subprocess.run(
["jsonschema-lint"], cwd=ASSETS_DIR / "extra" / "remote-schema", capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
instances/002.json:1:12:1:13: 1 is not of type 'string'
""".lstrip()
)
@pytest.fixture()
def mock_remote_schema_server():
process = subprocess.Popen(
["python", "-m", "http.server"],
cwd=ASSETS_DIR / "extra" / "remote-schema" / "server",
text=True,
)
try:
yield
finally:
exit_code = process.poll()
assert not exit_code, "\n".join([process.stdout, process.stderr])
process.terminate()
| import subprocess
from pathlib import Path
import pytest
ASSETS_DIR = Path(__file__).parent / "assets"
SIMPLE_DIR = ASSETS_DIR / "simple"
def test_it_uses_rc_file():
result = subprocess.run(["jsonschema-lint"], cwd=SIMPLE_DIR, capture_output=True, text=True)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
object/instances/002.json:1:1:1:15: Additional properties are not allowed ('qux' was unexpected)
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
numbers/instances/002.yaml:2:3:2:7: 'spam' is not of type 'number'
numbers/instances/002.yaml:2:3:2:7: 'spam' is not one of [1, 2, 3]
numbers/instances/002.yaml:2:1:4:1: ['spam', 2] is too short
object/instances/002.yml:1:1:2:1: Additional properties are not allowed ('qux' was unexpected)
""".lstrip()
)
def test_it_filters_on_provided_files():
result = subprocess.run(
["jsonschema-lint", "numbers/instances/002.json"], cwd=SIMPLE_DIR, capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
""".lstrip()
)
def test_it_uses_specified_schema():
result = subprocess.run(
["jsonschema-lint", "--schema", "numbers/schema.json", *SIMPLE_DIR.glob("**/instances/**/*.json")],
cwd=SIMPLE_DIR,
capture_output=True,
text=True,
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
object/instances/001.json:1:1:1:15: {'foo': 'bar'} is not of type 'array'
object/instances/002.json:1:1:1:15: {'qux': 'mux'} is not of type 'array'
numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number'
numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3]
numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short
""".lstrip()
)
def test_it_uses_yaml_schema():
result = subprocess.run(
["jsonschema-lint"], cwd=ASSETS_DIR / "extra" / "yaml-schema", capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
instances/002.json:3:16:3:17: 1 is not of type 'string'
""".lstrip()
)
def test_it_uses_remote_schema(mock_remote_schema_server):
result = subprocess.run(
["jsonschema-lint"], cwd=ASSETS_DIR / "extra" / "remote-schema", capture_output=True, text=True
)
output = "\n".join([result.stdout, result.stderr])
assert result.returncode == 1, output
assert (
result.stdout
== """
instances/002.json:1:12:1:13: 1 is not of type 'string'
""".lstrip()
)
@pytest.fixture()
def mock_remote_schema_server():
process = subprocess.Popen(
["python", "-m", "http.server"],
cwd=ASSETS_DIR / "extra" / "remote-schema" / "server",
text=True,
)
try:
yield
finally:
exit_code = process.poll()
assert not exit_code, "\n".join([process.stdout, process.stderr])
process.terminate() | en | 0.694947 | object/instances/002.json:1:1:1:15: Additional properties are not allowed ('qux' was unexpected) numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number' numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3] numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short numbers/instances/002.yaml:2:3:2:7: 'spam' is not of type 'number' numbers/instances/002.yaml:2:3:2:7: 'spam' is not one of [1, 2, 3] numbers/instances/002.yaml:2:1:4:1: ['spam', 2] is too short object/instances/002.yml:1:1:2:1: Additional properties are not allowed ('qux' was unexpected) numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number' numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3] numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short object/instances/001.json:1:1:1:15: {'foo': 'bar'} is not of type 'array' object/instances/002.json:1:1:1:15: {'qux': 'mux'} is not of type 'array' numbers/instances/002.json:1:2:1:8: 'spam' is not of type 'number' numbers/instances/002.json:1:2:1:8: 'spam' is not one of [1, 2, 3] numbers/instances/002.json:1:1:1:12: ['spam', 2] is too short instances/002.json:3:16:3:17: 1 is not of type 'string' instances/002.json:1:12:1:13: 1 is not of type 'string' | 2.162162 | 2 |
src/spectrum_functions_test.py | shmouses/SpectrumImageAnalysisPy | 3 | 6612254 | <gh_stars>1-10
import spectrum_functions
import unittest
import numpy as np
class SpectrumFunctionsTest(unittest.TestCase):
def test2DSpecX(self):
'''an error is raised if check_spectrum is given a 2D x input'''
x = np.arange(10).reshape((2, 5))
y = np.arange(10)
with self.assertRaisesRegex(ValueError,
'x is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def test2DSpecY(self):
'''an error is raised if check_spectrum is given a 2D y input'''
x = np.arange(10)
y = np.arange(10).reshape((2, 5))
with self.assertRaisesRegex(ValueError,
'y is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def testXYlenDifferent(self):
'''an error is raised if x and y are not the same length as each
other'''
x = np.arange(10)
y = np.arange(9)
with self.assertRaisesRegex(ValueError,
'x and y are not the same length'):
spectrum_functions.check_spectrum(x, y)
def testSliceRange(self):
'''slice_range works properly in normal use case'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 8]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYCal(self):
'''slice_range works properly with y non-integer values'''
x = np.arange(10)
y = np.arange(10)/10.
start_stop = [0.3, 0.7]
self.assertTrue(np.array_equal(
np.array([3, 4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStopOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 12]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8, 9]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [-2, 5]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYNegative(self):
'''slice_range works properly when y contains negative values'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartStopDims(self):
'''raise error in slice_range if start_stop are not a 2 element list or
tuple or array'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0, 5]
with self.assertRaisesRegex(ValueError,
'start_stop is not a 2 element list'):
spectrum_functions.slice_range(x, start_stop, y)
def testSliceRangeYDecreasing(self):
'''slice_range works when y is monotonically decreasing'''
x = np.arange(10)
y = np.flip(np.arange(10))
start_stop = [5, 2]
self.assertTrue(np.array_equal(
np.array([4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testNormalize1Index(self):
'''Normalize works as expected when a single index is given'''
x = np.arange(10)
ind = 2
np.testing.assert_allclose(
x/2.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeFloatIndex(self):
'''Normalize throws an error when given a float index'''
x = np.arange(10)
ind = 2.4
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize1IndexTuple(self):
'''Normalize throws an error if a single index inside a sequence
is given'''
x = np.arange(10)
ind = [3]
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize2Indices(self):
'''Normalize works as expected when two indices are given'''
x = np.arange(10)
ind = (2, 5)
np.testing.assert_allclose(
x/9.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeMoreIndices(self):
'''Normalize raises an error if more than two indices are passed as
input'''
x = np.arange(10)
ind = (2, 5, 3)
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testFindFWHMInt(self):
'''
find_fw finds the right fw given a simple function
'''
y = np.array([1, 1, 2, 4, 2, 1, 1])
x = np.arange(7)
fwhm = 2.
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWHMDecimal(self):
'''
find_fw finds the right fw given a simple function, answer is a fraction of the dispersion
'''
y = np.array([1, 1, 2, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 5/3.
np.testing.assert_almost_equal(spectrum_functions.find_fw(y, 1, 3, 0.5), fwhm)
def testFindFWAsymmetrical(self):
'''
find_fw finds the right fw given an asymmetrical function
'''
y = np.array([1, 1, 3, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWAsymmetricalRight(self):
'''
find_fw finds the right fw given an asymmetrical function, higher on the right side
'''
y = np.array([1, 1, 2, 5, 3, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
if __name__ == '__main__':
unittest.main()
| import spectrum_functions
import unittest
import numpy as np
class SpectrumFunctionsTest(unittest.TestCase):
def test2DSpecX(self):
'''an error is raised if check_spectrum is given a 2D x input'''
x = np.arange(10).reshape((2, 5))
y = np.arange(10)
with self.assertRaisesRegex(ValueError,
'x is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def test2DSpecY(self):
'''an error is raised if check_spectrum is given a 2D y input'''
x = np.arange(10)
y = np.arange(10).reshape((2, 5))
with self.assertRaisesRegex(ValueError,
'y is not a 1 dimensional array'):
spectrum_functions.check_spectrum(x, y)
def testXYlenDifferent(self):
'''an error is raised if x and y are not the same length as each
other'''
x = np.arange(10)
y = np.arange(9)
with self.assertRaisesRegex(ValueError,
'x and y are not the same length'):
spectrum_functions.check_spectrum(x, y)
def testSliceRange(self):
'''slice_range works properly in normal use case'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 8]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYCal(self):
'''slice_range works properly with y non-integer values'''
x = np.arange(10)
y = np.arange(10)/10.
start_stop = [0.3, 0.7]
self.assertTrue(np.array_equal(
np.array([3, 4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStopOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [5, 12]
self.assertTrue(np.array_equal(
np.array([5, 6, 7, 8, 9]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartOutside(self):
'''slice_range works properly when stop is greater than max(y)'''
x = np.arange(10)
y = np.arange(10)
start_stop = [-2, 5]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeYNegative(self):
'''slice_range works properly when y contains negative values'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0]
self.assertTrue(np.array_equal(
np.array([0, 1, 2, 3, 4, 5]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testSliceRangeStartStopDims(self):
'''raise error in slice_range if start_stop are not a 2 element list or
tuple or array'''
x = np.arange(10)
y = np.arange(10) - 5
start_stop = [-7, 0, 5]
with self.assertRaisesRegex(ValueError,
'start_stop is not a 2 element list'):
spectrum_functions.slice_range(x, start_stop, y)
def testSliceRangeYDecreasing(self):
'''slice_range works when y is monotonically decreasing'''
x = np.arange(10)
y = np.flip(np.arange(10))
start_stop = [5, 2]
self.assertTrue(np.array_equal(
np.array([4, 5, 6, 7]),
spectrum_functions.slice_range(x, start_stop, y)
))
def testNormalize1Index(self):
'''Normalize works as expected when a single index is given'''
x = np.arange(10)
ind = 2
np.testing.assert_allclose(
x/2.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeFloatIndex(self):
'''Normalize throws an error when given a float index'''
x = np.arange(10)
ind = 2.4
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize1IndexTuple(self):
'''Normalize throws an error if a single index inside a sequence
is given'''
x = np.arange(10)
ind = [3]
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testNormalize2Indices(self):
'''Normalize works as expected when two indices are given'''
x = np.arange(10)
ind = (2, 5)
np.testing.assert_allclose(
x/9.,
spectrum_functions.normalize(x, ind)
)
def testNormalizeMoreIndices(self):
'''Normalize raises an error if more than two indices are passed as
input'''
x = np.arange(10)
ind = (2, 5, 3)
with self.assertRaises(ValueError):
spectrum_functions.normalize(x, ind)
def testFindFWHMInt(self):
'''
find_fw finds the right fw given a simple function
'''
y = np.array([1, 1, 2, 4, 2, 1, 1])
x = np.arange(7)
fwhm = 2.
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWHMDecimal(self):
'''
find_fw finds the right fw given a simple function, answer is a fraction of the dispersion
'''
y = np.array([1, 1, 2, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 5/3.
np.testing.assert_almost_equal(spectrum_functions.find_fw(y, 1, 3, 0.5), fwhm)
def testFindFWAsymmetrical(self):
'''
find_fw finds the right fw given an asymmetrical function
'''
y = np.array([1, 1, 3, 5, 2, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
def testFindFWAsymmetricalRight(self):
'''
find_fw finds the right fw given an asymmetrical function, higher on the right side
'''
y = np.array([1, 1, 2, 5, 3, 1, 1])
x = np.arange(7)
fwhm = 2.5 / 3 + 1 + 0.25
self.assertEqual(fwhm, spectrum_functions.find_fw(y, 1, 3, 0.5))
if __name__ == '__main__':
unittest.main() | en | 0.710395 | an error is raised if check_spectrum is given a 2D x input an error is raised if check_spectrum is given a 2D y input an error is raised if x and y are not the same length as each other slice_range works properly in normal use case slice_range works properly with y non-integer values slice_range works properly when stop is greater than max(y) slice_range works properly when stop is greater than max(y) slice_range works properly when y contains negative values raise error in slice_range if start_stop are not a 2 element list or tuple or array slice_range works when y is monotonically decreasing Normalize works as expected when a single index is given Normalize throws an error when given a float index Normalize throws an error if a single index inside a sequence is given Normalize works as expected when two indices are given Normalize raises an error if more than two indices are passed as input find_fw finds the right fw given a simple function find_fw finds the right fw given a simple function, answer is a fraction of the dispersion find_fw finds the right fw given an asymmetrical function find_fw finds the right fw given an asymmetrical function, higher on the right side | 2.944675 | 3 |
django_modals/modals.py | jonesim/django-modals | 8 | 6612255 | import base64
import json
import inspect
from django.forms.fields import Field
from django.forms.models import modelform_factory, fields_for_model
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic.base import TemplateResponseMixin, TemplateView
from django.views.generic.edit import BaseFormView
from django.views.generic.detail import SingleObjectMixin
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.decorators import method_decorator
from ajax_helpers.mixins import AjaxHelpers
from . import processes
from .forms import ModelCrispyForm
from .helper import render_modal, modal_button, modal_button_group, ajax_modal_redirect, modal_button_method, \
ajax_modal_replace
class ModalException(Exception):
pass
@method_decorator(ensure_csrf_cookie, name='dispatch')
class BaseModalMixin(AjaxHelpers):
kwargs: dict
button_group_class = None
button_container_class = None
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
ajax_commands = ['button', 'select2', 'ajax']
button_group_css = None
size = 'lg'
no_parent_template = 'django_modals/blank_page_form.html'
def __init__(self):
super().__init__()
if not hasattr(self, 'modal_mode'):
self.modal_mode = True
self.slug = {}
def get_context_data(self, **kwargs):
# noinspection PyUnresolvedReferences
context = super().get_context_data(**kwargs) if hasattr(super(), 'get_context_data') else {}
context.update({'request': self.request, 'slug': self.slug})
context['modal_url'] = kwargs.get('modal_url', self.request.get_full_path())
context['no_header_x'] = getattr(self, 'no_header_x', None)
context['center_header'] = kwargs.get('center_header', getattr(self, 'center_header', None))
context['size'] = kwargs.get('size', self.size)
context['modal_type'] = self.kwargs.get('modal_type')
return context
def split_slug(self, kwargs):
if 'slug' in kwargs and kwargs['slug'] != '-':
s = kwargs['slug'].split('-')
if len(s) == 1:
self.slug['pk'] = s[0]
else:
self.slug.update({s[k]: s[k+1] for k in range(0, int(len(s)-1), 2)})
if 'pk' in self.slug:
self.kwargs['pk'] = self.slug['pk']
def process_slug_kwargs(self):
return True
def split_base64(self, kwargs):
if 'base64' in kwargs:
base64_data = json.loads(base64.urlsafe_b64decode(self.kwargs['base64']))
if not isinstance(base64_data, dict):
base64_data = {'base64': base64_data}
self.slug.update(base64_data)
def dispatch(self, request, *args, **kwargs):
self.split_slug(kwargs)
self.split_base64(kwargs)
if self.process_slug_kwargs():
# noinspection PyUnresolvedReferences
return super().dispatch(request, *args, **self.kwargs)
else:
raise ModalException('User does not have permission')
def button_refresh_modal(self, **_kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_class=self.__class__,
slug=self.kwargs.get('slug', '-')))
def button_group(self):
button_kwargs = {
'button_group_class': self.kwargs.get('button_group_class', self.button_group_class),
'button_container_class': self.kwargs.get('button_container_class', self.button_container_class)
}
button_kwargs = {k: v for k, v in button_kwargs.items() if v}
return modal_button_group(self.buttons, **button_kwargs)
def check_for_background_page(self, context):
if not self.request.is_ajax() and self.modal_mode:
context['modal_type'] = 'no-parent'
context['no_header_x'] = True
context['form'] = render_modal(template_name=self.template_name, **context)
# noinspection PyAttributeOutsideInit
self.template_name = self.no_parent_template
def modal_replace(self, modal_name=None, modal_class=None, slug='-', **kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_name, slug=slug,
modal_class=modal_class, **kwargs))
def message(self, message, title=None, **modal_kwargs):
if title is not None:
modal_kwargs['modal_title'] = title
return self.modal_replace(modal_class=Modal, message=message, ajax_function='modal_html', **modal_kwargs)
def confirm(self, message, title=None, button_group_type='confirm', **kwargs):
return self.message(message, title=title, button_group_type=button_group_type, **kwargs)
def modal_redirect(self, modal_name, slug='-'):
return self.command_response(ajax_modal_redirect(modal_name, slug))
class BaseModal(BaseModalMixin, TemplateView):
template_name = 'django_modals/modal_base.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['header_title'] = kwargs.get('modal_title', getattr(self, 'modal_title', None))
self.check_for_background_page(context)
return context
class Modal(BaseModal):
def modal_content(self):
return self.kwargs.get('message', '')
def get_modal_buttons(self):
if 'buttons' in self.kwargs:
return self.kwargs['buttons']
button_group_type = self.kwargs.get('button_group_type')
if button_group_type == 'confirm':
return [
modal_button_method('Confirm', self.kwargs.get('button_function', 'confirm'), 'btn-success'),
modal_button('Cancel', 'close', 'btn-secondary')
]
elif button_group_type == 'yes_cancel':
return [
modal_button_method('Yes', self.kwargs.get('button_function', 'confirm'), 'btn-danger'),
modal_button('Cancel', 'close', 'btn-success')
]
else:
return [modal_button('OK', 'close', 'btn-success')]
@property
def extra_context(self):
if not self._extra_content:
modal_content = self.modal_content()
if not self.buttons:
self.buttons = self.get_modal_buttons()
self._extra_content = {'form': mark_safe(modal_content + self.button_group())}
return self._extra_content
def __init__(self):
if not hasattr(self, 'buttons'):
self.buttons = []
self._extra_content = None
super().__init__()
class TemplateModal(Modal):
modal_template = None
def modal_context(self):
context = self.kwargs.get('context', {})
return context
def modal_content(self):
return render_to_string(self.modal_template, self.modal_context())
def __init__(self, modal_template=None, modal_title=None, size=None, **kwargs):
# These kwargs will be overwritten if called as_view()
self.kwargs = kwargs
if size:
self.size = size
if modal_title:
self.modal_title = modal_title
if modal_template:
self.modal_template = modal_template
super().__init__()
def modal_html(self, request):
self.request = request
context = self.get_context_data()
if 'message' in self.kwargs:
context['message'] = self.kwargs['message']
return render_to_string(self.template_name, context)
class FormModalMixin(BaseModalMixin):
template_name = 'django_modals/modal_base.html'
def form_invalid(self, form):
if self.request.GET.get('formonly', False):
form = self.get_form()
return HttpResponse(str(form))
return self.refresh_form(form)
def post_save(self, created):
pass
def form_valid(self, form):
org_id = self.object.id if hasattr(self, 'object') else None
save_function = getattr(form, 'save', None)
if save_function:
save_function()
self.post_save(created=org_id is None)
if not self.response_commands:
self.add_command('reload')
return self.command_response()
def refresh_form(self, form):
self.add_command('html', selector=f'#{form.helper.form_id}', parent=True, html=str(form))
return self.command_response('modal_refresh_trigger', selector=f'#{form.helper.form_id}')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['css'] = 'modal'
if context['form']:
context['header_title'] = context['form'].get_title()
else:
context['form'] = kwargs['message']
self.check_for_background_page(context)
return context
def __init__(self, *args, **kwargs):
if not hasattr(self, 'process'):
self.process = None
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def button_make_edit(self, **_kwargs):
self.slug['modal'] = 'editdelete'
new_slug = '-'.join([f'{k}-{v}' for k, v in self.slug.items()])
self.request.method = 'GET'
self.process = processes.PROCESS_EDIT_DELETE
self.request.path = reverse(self.request.resolver_match.url_name, kwargs={'slug': new_slug})
return self.command_response('overwrite_modal',
html=render_to_string(self.template_name, self.get_context_data()))
def button_refresh_modal(self, **kwargs):
if self.slug.get('readonly') or kwargs.get('whole_modal'):
return super().button_refresh_modal()
else:
form = self.get_form()
form.clear_errors()
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request_user'] = self.request.user
kwargs['no_buttons'] = self.request.GET.get('no_buttons')
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
if hasattr(self, 'clean') and callable(self.clean):
kwargs['clean'] = self.clean
kwargs.update({k: getattr(self, k, None) for k in ['modal_title', 'slug']})
if hasattr(self, 'helper_class'):
kwargs['helper_class'] = self.helper_class
kwargs['process'] = self.process
return kwargs
class FormModal(FormModalMixin, TemplateResponseMixin, BaseFormView):
pass
class ProcessFormFields:
def __init__(self, form_fields, widgets=None, field_classes=None, labels=None, help_texts=None,
error_messages=None):
self.fields = []
self.widgets = widgets if widgets else {}
self.labels = labels if labels else {}
self.help_texts = help_texts if help_texts else {}
self.error_messages = error_messages if error_messages else {}
self.field_classes = field_classes if field_classes else {}
self.layout_field_classes = {}
self.layout_field_params = {}
for f in form_fields:
if type(f) == tuple:
self.fields.append(f[0])
param_dict = dict(f[1])
for k in f[1]:
if k == 'widget':
self.widgets[f[0]] = param_dict.pop(k)
if k == 'label':
self.labels[f[0]] = param_dict.pop(k)
if k == 'help_text':
self.help_texts[f[0]] = param_dict.pop(k)
if k == 'error_messages':
self.error_messages[f[0]] = param_dict.pop(k)
if k == 'layout_field_class':
self.layout_field_classes[f[0]] = param_dict.pop(k)
if param_dict:
self.layout_field_params[f[0]] = param_dict
else:
self.fields.append(f)
def form_init_kwargs(self):
return {f: getattr(self, f) for f in ['layout_field_classes', 'layout_field_params'] if getattr(self, f, None)}
def extra_kwargs(self):
return {f: getattr(self, f) for f in ['widgets', 'field_classes', 'labels', 'help_texts',
'error_messages'] if getattr(self, f, None)}
class ModelFormModal(SingleObjectMixin, FormModal):
form_fields = []
template_name = 'django_modals/modal_base.html'
base_form = ModelCrispyForm
delete_message = 'Are you sure you want to delete?'
delete_title = 'Warning'
field_classes = None
permission_delete = processes.PERMISSION_DISABLE
permission_edit = processes.PERMISSION_OFF
permission_view = processes.PERMISSION_OFF
permission_create = processes.PERMISSION_OFF
@staticmethod
def formfield_callback(f, **kwargs):
form_class = kwargs.get('form_class')
if isinstance(form_class, Field):
if hasattr(form_class, 'field_setup'):
# noinspection PyCallingNonCallable
form_class.field_setup(f)
return form_class
elif form_class:
return form_class(**kwargs)
return f.formfield(**kwargs)
def get_form_class(self):
if not self.form_class:
processed_form_fields = ProcessFormFields(self.form_fields, widgets=getattr(self, 'widgets', None),
field_classes=getattr(self, 'field_classes', None),
labels=getattr(self, 'labels', None),
help_texts=getattr(self, 'help_texts', None),
error_messages=getattr(self, 'error_messages', None))
self.form_init_args = processed_form_fields.form_init_kwargs()
self.form_class = modelform_factory(self.model, form=self.base_form, fields=processed_form_fields.fields,
formfield_callback=self.formfield_callback,
**processed_form_fields.extra_kwargs())
return self.form_class
def __init__(self, *args, **kwargs):
self.form_init_args = {}
super().__init__(*args, **kwargs)
self.object = None
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if hasattr(self, 'object'):
kwargs.update({'instance': self.object})
kwargs.update(self.form_init_args)
return kwargs
def object_delete(self):
pass
def button_confirm_delete(self, **_kwargs):
if self.process in [processes.PROCESS_DELETE, processes.PROCESS_EDIT_DELETE]:
self.object.delete()
self.object_delete()
if not self.response_commands:
self.add_command('close', no_refresh=True)
self.add_command('reload')
return self.command_response()
def button_delete(self, **_kwargs):
return self.confirm(self.delete_message, self.delete_title, button_function='confirm_delete',
button_group_type='yes_cancel', size='md')
@staticmethod
def user_has_perm(cls_or_instance, user, process):
permission_type = getattr(cls_or_instance, processes.process_data[process].class_attribute)
if permission_type == processes.PERMISSION_METHOD:
# If permission method is not a staticmethod and function is called by class rather than instance
# send None instead of self
if inspect.isclass(cls_or_instance) and len(inspect.signature(cls_or_instance.permission).parameters) == 3:
permission = cls_or_instance.permission(None, user, process)
else:
permission = cls_or_instance.permission(user, process)
elif permission_type == processes.PERMISSION_OFF:
permission = True
elif permission_type == processes.PERMISSION_DISABLE:
permission = False
elif permission_type == processes.PERMISSION_AUTHENTICATED:
permission = user.is_authenticated
elif permission_type == processes.PERMISSION_STAFF:
permission = user.is_staff or user.is_superuser
else:
# noinspection PyProtectedMember
perms = [f'{cls_or_instance.model._meta.app_label}.{p}_{cls_or_instance.model._meta.model_name}'
for p in processes.process_data[process].django_permission]
permission = user.has_perms(perms)
return permission
def get_process(self, user, process):
while True:
permission = self.user_has_perm(self, user, process)
if permission:
break
process = processes.process_data[process].fallback
if not process:
break
return permission, process
def get_model(self):
pass
def get_queryset(self):
query = super().get_queryset()
if hasattr(self.model, 'query_filter'):
return self.model.query_filter(query, self.request, modal=self)
return query
def process_slug_kwargs(self):
if 'pk' not in self.slug:
self.process = processes.PROCESS_CREATE
elif 'modal' in self.slug:
self.process = processes.modal_url_type[self.slug['modal']]
else:
if self.process is None:
self.process = processes.PROCESS_EDIT_DELETE
if self.model is None:
if self.form_class:
self.model = self.form_class.get_model(self.slug)
else:
self.model = self.get_model()
if 'pk' in self.kwargs:
self.object = self.get_object()
else:
self.object = self.model()
# noinspection PyProtectedMember
fields = self.model._meta.get_fields()
field_dict = {}
for f in fields:
field_dict[f.name.lower()] = f
for i in self.slug:
if i in field_dict and field_dict[i].many_to_many:
self.initial[i] = [self.slug[i]]
else:
setattr(self.object, i, self.slug[i])
has_perm, self.process = self.get_process(self.request.user, self.process)
return has_perm
def select2_ajax_search(self, page_len=10, filter_field=None, filter_search='istartswith', search=None, page=None,
extra_filter=None, **_kwargs):
field_name = inspect.stack()[1][3][len('select2_'):]
field = fields_for_model(self.model, field_classes=self.field_classes, fields=[field_name],
formfield_callback=self.formfield_callback)[field_name]
if filter_field and search:
query_filter = {f'{filter_field}__{filter_search}': search}
else:
query_filter = {}
if extra_filter:
query_filter.update(extra_filter)
if hasattr(field, 'model'):
# noinspection PyUnresolvedReferences
choices = field.model.objects.filter(**query_filter)
else:
choices = field.choices.queryset.filter(**query_filter)
if page:
choices = choices[page_len * (page - 1): page_len * page + 1]
if hasattr(field, 'select_str'):
# noinspection PyCallingNonCallable
results = [{'id': str(c.id), 'text': field.select_str(c)} for c in choices[:page_len]]
else:
results = [{'id': str(c.id), 'text': str(c)} for c in choices[:page_len]]
return JsonResponse({'results': results, 'pagination': {'more': len(choices) > len(results)}})
class MultiForm:
def __init__(self, model, fields, form_id=None, initial=None, widgets=None, **kwargs):
self.model = model
self.fields = fields
self.kwargs = kwargs
self.form_id = form_id
self.initial = initial if initial else {}
self.widgets = widgets if widgets else {}
def make_form_id(self, used_ids):
if not self.form_id:
self.form_id = self.model.__name__ + 'Form'
if self.form_id in used_ids:
self.form_id += '_{}'
count = 1
while self.form_id.format(count) in used_ids:
count += 1
self.form_id = self.form_id.format(count)
used_ids.append(self.form_id)
def get_kwargs(self):
kwargs = {'form_id': self.form_id, 'initial': self.initial, 'no_buttons': True}
kwargs.update(self.kwargs)
return kwargs
class MultiFormModal(BaseModal):
template_name = 'django_modals/multi_form.html'
modal_title = ''
base_form = ModelCrispyForm
forms = []
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
def get_form_classes(self):
for f in self.forms:
processed_form_fields = ProcessFormFields(f.fields, widgets=f.widgets)
self.form_setup_args.append({
'form_class': modelform_factory(f.model, form=self.base_form, fields=processed_form_fields.fields,
**processed_form_fields.extra_kwargs()),
'processed_form_fields': processed_form_fields
})
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
self.form_setup_args = []
def get_form_kwargs(self):
all_kwargs = []
used_ids = []
if self.request.method in ('POST', 'PUT'):
form_data = json.loads(self.request.body)
else:
form_data = {}
for f in self.forms:
f.make_form_id(used_ids)
kwargs = f.get_kwargs()
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': form_data[f.form_id],
# 'files': self.request.FILES,
})
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
all_kwargs.append(kwargs)
all_kwargs[-1]['no_buttons'] = False
return all_kwargs
def get_forms(self):
self.get_form_classes()
form_kwargs = self.get_form_kwargs()
forms = []
for c, s in enumerate(self.form_setup_args):
kwargs = form_kwargs[c]
kwargs.update(s['processed_form_fields'].form_init_kwargs())
form = s['form_class'](**kwargs)
for field_name, field in form.fields.items():
field.widget.attrs.update({'id': f'id_{c}_{field_name}'})
forms.append(form)
return forms
def get_context_data(self, **kwargs):
self.extra_context = {
'forms': self.get_forms(),
'header_title': self.modal_title
}
context = super().get_context_data(**kwargs)
return context
def refresh_form(self, forms):
self.add_command('html', selector=f'#{forms[0].form_id}', parent=True,
html=' '.join([str(f) for f in forms]))
return self.command_response('modal_refresh_trigger', selector=f'#{forms[0].form_id}')
def forms_valid(self, forms):
pass
def post(self, request, *args, **kwargs):
post_response = super().post(request, *args, **kwargs)
if post_response:
return post_response
forms = self.get_forms()
for f in forms:
if not f.is_valid():
return self.refresh_form(forms)
return self.forms_valid({f.helper.form_id: f for f in forms})
| import base64
import json
import inspect
from django.forms.fields import Field
from django.forms.models import modelform_factory, fields_for_model
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.generic.base import TemplateResponseMixin, TemplateView
from django.views.generic.edit import BaseFormView
from django.views.generic.detail import SingleObjectMixin
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.safestring import mark_safe
from django.utils.decorators import method_decorator
from ajax_helpers.mixins import AjaxHelpers
from . import processes
from .forms import ModelCrispyForm
from .helper import render_modal, modal_button, modal_button_group, ajax_modal_redirect, modal_button_method, \
ajax_modal_replace
class ModalException(Exception):
pass
@method_decorator(ensure_csrf_cookie, name='dispatch')
class BaseModalMixin(AjaxHelpers):
kwargs: dict
button_group_class = None
button_container_class = None
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
ajax_commands = ['button', 'select2', 'ajax']
button_group_css = None
size = 'lg'
no_parent_template = 'django_modals/blank_page_form.html'
def __init__(self):
super().__init__()
if not hasattr(self, 'modal_mode'):
self.modal_mode = True
self.slug = {}
def get_context_data(self, **kwargs):
# noinspection PyUnresolvedReferences
context = super().get_context_data(**kwargs) if hasattr(super(), 'get_context_data') else {}
context.update({'request': self.request, 'slug': self.slug})
context['modal_url'] = kwargs.get('modal_url', self.request.get_full_path())
context['no_header_x'] = getattr(self, 'no_header_x', None)
context['center_header'] = kwargs.get('center_header', getattr(self, 'center_header', None))
context['size'] = kwargs.get('size', self.size)
context['modal_type'] = self.kwargs.get('modal_type')
return context
def split_slug(self, kwargs):
if 'slug' in kwargs and kwargs['slug'] != '-':
s = kwargs['slug'].split('-')
if len(s) == 1:
self.slug['pk'] = s[0]
else:
self.slug.update({s[k]: s[k+1] for k in range(0, int(len(s)-1), 2)})
if 'pk' in self.slug:
self.kwargs['pk'] = self.slug['pk']
def process_slug_kwargs(self):
return True
def split_base64(self, kwargs):
if 'base64' in kwargs:
base64_data = json.loads(base64.urlsafe_b64decode(self.kwargs['base64']))
if not isinstance(base64_data, dict):
base64_data = {'base64': base64_data}
self.slug.update(base64_data)
def dispatch(self, request, *args, **kwargs):
self.split_slug(kwargs)
self.split_base64(kwargs)
if self.process_slug_kwargs():
# noinspection PyUnresolvedReferences
return super().dispatch(request, *args, **self.kwargs)
else:
raise ModalException('User does not have permission')
def button_refresh_modal(self, **_kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_class=self.__class__,
slug=self.kwargs.get('slug', '-')))
def button_group(self):
button_kwargs = {
'button_group_class': self.kwargs.get('button_group_class', self.button_group_class),
'button_container_class': self.kwargs.get('button_container_class', self.button_container_class)
}
button_kwargs = {k: v for k, v in button_kwargs.items() if v}
return modal_button_group(self.buttons, **button_kwargs)
def check_for_background_page(self, context):
if not self.request.is_ajax() and self.modal_mode:
context['modal_type'] = 'no-parent'
context['no_header_x'] = True
context['form'] = render_modal(template_name=self.template_name, **context)
# noinspection PyAttributeOutsideInit
self.template_name = self.no_parent_template
def modal_replace(self, modal_name=None, modal_class=None, slug='-', **kwargs):
return self.command_response(ajax_modal_replace(self.request, modal_name, slug=slug,
modal_class=modal_class, **kwargs))
def message(self, message, title=None, **modal_kwargs):
if title is not None:
modal_kwargs['modal_title'] = title
return self.modal_replace(modal_class=Modal, message=message, ajax_function='modal_html', **modal_kwargs)
def confirm(self, message, title=None, button_group_type='confirm', **kwargs):
return self.message(message, title=title, button_group_type=button_group_type, **kwargs)
def modal_redirect(self, modal_name, slug='-'):
return self.command_response(ajax_modal_redirect(modal_name, slug))
class BaseModal(BaseModalMixin, TemplateView):
template_name = 'django_modals/modal_base.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['header_title'] = kwargs.get('modal_title', getattr(self, 'modal_title', None))
self.check_for_background_page(context)
return context
class Modal(BaseModal):
def modal_content(self):
return self.kwargs.get('message', '')
def get_modal_buttons(self):
if 'buttons' in self.kwargs:
return self.kwargs['buttons']
button_group_type = self.kwargs.get('button_group_type')
if button_group_type == 'confirm':
return [
modal_button_method('Confirm', self.kwargs.get('button_function', 'confirm'), 'btn-success'),
modal_button('Cancel', 'close', 'btn-secondary')
]
elif button_group_type == 'yes_cancel':
return [
modal_button_method('Yes', self.kwargs.get('button_function', 'confirm'), 'btn-danger'),
modal_button('Cancel', 'close', 'btn-success')
]
else:
return [modal_button('OK', 'close', 'btn-success')]
@property
def extra_context(self):
if not self._extra_content:
modal_content = self.modal_content()
if not self.buttons:
self.buttons = self.get_modal_buttons()
self._extra_content = {'form': mark_safe(modal_content + self.button_group())}
return self._extra_content
def __init__(self):
if not hasattr(self, 'buttons'):
self.buttons = []
self._extra_content = None
super().__init__()
class TemplateModal(Modal):
modal_template = None
def modal_context(self):
context = self.kwargs.get('context', {})
return context
def modal_content(self):
return render_to_string(self.modal_template, self.modal_context())
def __init__(self, modal_template=None, modal_title=None, size=None, **kwargs):
# These kwargs will be overwritten if called as_view()
self.kwargs = kwargs
if size:
self.size = size
if modal_title:
self.modal_title = modal_title
if modal_template:
self.modal_template = modal_template
super().__init__()
def modal_html(self, request):
self.request = request
context = self.get_context_data()
if 'message' in self.kwargs:
context['message'] = self.kwargs['message']
return render_to_string(self.template_name, context)
class FormModalMixin(BaseModalMixin):
template_name = 'django_modals/modal_base.html'
def form_invalid(self, form):
if self.request.GET.get('formonly', False):
form = self.get_form()
return HttpResponse(str(form))
return self.refresh_form(form)
def post_save(self, created):
pass
def form_valid(self, form):
org_id = self.object.id if hasattr(self, 'object') else None
save_function = getattr(form, 'save', None)
if save_function:
save_function()
self.post_save(created=org_id is None)
if not self.response_commands:
self.add_command('reload')
return self.command_response()
def refresh_form(self, form):
self.add_command('html', selector=f'#{form.helper.form_id}', parent=True, html=str(form))
return self.command_response('modal_refresh_trigger', selector=f'#{form.helper.form_id}')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['css'] = 'modal'
if context['form']:
context['header_title'] = context['form'].get_title()
else:
context['form'] = kwargs['message']
self.check_for_background_page(context)
return context
def __init__(self, *args, **kwargs):
if not hasattr(self, 'process'):
self.process = None
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
def button_make_edit(self, **_kwargs):
self.slug['modal'] = 'editdelete'
new_slug = '-'.join([f'{k}-{v}' for k, v in self.slug.items()])
self.request.method = 'GET'
self.process = processes.PROCESS_EDIT_DELETE
self.request.path = reverse(self.request.resolver_match.url_name, kwargs={'slug': new_slug})
return self.command_response('overwrite_modal',
html=render_to_string(self.template_name, self.get_context_data()))
def button_refresh_modal(self, **kwargs):
if self.slug.get('readonly') or kwargs.get('whole_modal'):
return super().button_refresh_modal()
else:
form = self.get_form()
form.clear_errors()
return self.form_invalid(form)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request_user'] = self.request.user
kwargs['no_buttons'] = self.request.GET.get('no_buttons')
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
if hasattr(self, 'clean') and callable(self.clean):
kwargs['clean'] = self.clean
kwargs.update({k: getattr(self, k, None) for k in ['modal_title', 'slug']})
if hasattr(self, 'helper_class'):
kwargs['helper_class'] = self.helper_class
kwargs['process'] = self.process
return kwargs
class FormModal(FormModalMixin, TemplateResponseMixin, BaseFormView):
pass
class ProcessFormFields:
def __init__(self, form_fields, widgets=None, field_classes=None, labels=None, help_texts=None,
error_messages=None):
self.fields = []
self.widgets = widgets if widgets else {}
self.labels = labels if labels else {}
self.help_texts = help_texts if help_texts else {}
self.error_messages = error_messages if error_messages else {}
self.field_classes = field_classes if field_classes else {}
self.layout_field_classes = {}
self.layout_field_params = {}
for f in form_fields:
if type(f) == tuple:
self.fields.append(f[0])
param_dict = dict(f[1])
for k in f[1]:
if k == 'widget':
self.widgets[f[0]] = param_dict.pop(k)
if k == 'label':
self.labels[f[0]] = param_dict.pop(k)
if k == 'help_text':
self.help_texts[f[0]] = param_dict.pop(k)
if k == 'error_messages':
self.error_messages[f[0]] = param_dict.pop(k)
if k == 'layout_field_class':
self.layout_field_classes[f[0]] = param_dict.pop(k)
if param_dict:
self.layout_field_params[f[0]] = param_dict
else:
self.fields.append(f)
def form_init_kwargs(self):
return {f: getattr(self, f) for f in ['layout_field_classes', 'layout_field_params'] if getattr(self, f, None)}
def extra_kwargs(self):
return {f: getattr(self, f) for f in ['widgets', 'field_classes', 'labels', 'help_texts',
'error_messages'] if getattr(self, f, None)}
class ModelFormModal(SingleObjectMixin, FormModal):
form_fields = []
template_name = 'django_modals/modal_base.html'
base_form = ModelCrispyForm
delete_message = 'Are you sure you want to delete?'
delete_title = 'Warning'
field_classes = None
permission_delete = processes.PERMISSION_DISABLE
permission_edit = processes.PERMISSION_OFF
permission_view = processes.PERMISSION_OFF
permission_create = processes.PERMISSION_OFF
@staticmethod
def formfield_callback(f, **kwargs):
form_class = kwargs.get('form_class')
if isinstance(form_class, Field):
if hasattr(form_class, 'field_setup'):
# noinspection PyCallingNonCallable
form_class.field_setup(f)
return form_class
elif form_class:
return form_class(**kwargs)
return f.formfield(**kwargs)
def get_form_class(self):
if not self.form_class:
processed_form_fields = ProcessFormFields(self.form_fields, widgets=getattr(self, 'widgets', None),
field_classes=getattr(self, 'field_classes', None),
labels=getattr(self, 'labels', None),
help_texts=getattr(self, 'help_texts', None),
error_messages=getattr(self, 'error_messages', None))
self.form_init_args = processed_form_fields.form_init_kwargs()
self.form_class = modelform_factory(self.model, form=self.base_form, fields=processed_form_fields.fields,
formfield_callback=self.formfield_callback,
**processed_form_fields.extra_kwargs())
return self.form_class
def __init__(self, *args, **kwargs):
self.form_init_args = {}
super().__init__(*args, **kwargs)
self.object = None
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
if hasattr(self, 'object'):
kwargs.update({'instance': self.object})
kwargs.update(self.form_init_args)
return kwargs
def object_delete(self):
pass
def button_confirm_delete(self, **_kwargs):
if self.process in [processes.PROCESS_DELETE, processes.PROCESS_EDIT_DELETE]:
self.object.delete()
self.object_delete()
if not self.response_commands:
self.add_command('close', no_refresh=True)
self.add_command('reload')
return self.command_response()
def button_delete(self, **_kwargs):
return self.confirm(self.delete_message, self.delete_title, button_function='confirm_delete',
button_group_type='yes_cancel', size='md')
@staticmethod
def user_has_perm(cls_or_instance, user, process):
permission_type = getattr(cls_or_instance, processes.process_data[process].class_attribute)
if permission_type == processes.PERMISSION_METHOD:
# If permission method is not a staticmethod and function is called by class rather than instance
# send None instead of self
if inspect.isclass(cls_or_instance) and len(inspect.signature(cls_or_instance.permission).parameters) == 3:
permission = cls_or_instance.permission(None, user, process)
else:
permission = cls_or_instance.permission(user, process)
elif permission_type == processes.PERMISSION_OFF:
permission = True
elif permission_type == processes.PERMISSION_DISABLE:
permission = False
elif permission_type == processes.PERMISSION_AUTHENTICATED:
permission = user.is_authenticated
elif permission_type == processes.PERMISSION_STAFF:
permission = user.is_staff or user.is_superuser
else:
# noinspection PyProtectedMember
perms = [f'{cls_or_instance.model._meta.app_label}.{p}_{cls_or_instance.model._meta.model_name}'
for p in processes.process_data[process].django_permission]
permission = user.has_perms(perms)
return permission
def get_process(self, user, process):
while True:
permission = self.user_has_perm(self, user, process)
if permission:
break
process = processes.process_data[process].fallback
if not process:
break
return permission, process
def get_model(self):
pass
def get_queryset(self):
query = super().get_queryset()
if hasattr(self.model, 'query_filter'):
return self.model.query_filter(query, self.request, modal=self)
return query
def process_slug_kwargs(self):
if 'pk' not in self.slug:
self.process = processes.PROCESS_CREATE
elif 'modal' in self.slug:
self.process = processes.modal_url_type[self.slug['modal']]
else:
if self.process is None:
self.process = processes.PROCESS_EDIT_DELETE
if self.model is None:
if self.form_class:
self.model = self.form_class.get_model(self.slug)
else:
self.model = self.get_model()
if 'pk' in self.kwargs:
self.object = self.get_object()
else:
self.object = self.model()
# noinspection PyProtectedMember
fields = self.model._meta.get_fields()
field_dict = {}
for f in fields:
field_dict[f.name.lower()] = f
for i in self.slug:
if i in field_dict and field_dict[i].many_to_many:
self.initial[i] = [self.slug[i]]
else:
setattr(self.object, i, self.slug[i])
has_perm, self.process = self.get_process(self.request.user, self.process)
return has_perm
def select2_ajax_search(self, page_len=10, filter_field=None, filter_search='istartswith', search=None, page=None,
extra_filter=None, **_kwargs):
field_name = inspect.stack()[1][3][len('select2_'):]
field = fields_for_model(self.model, field_classes=self.field_classes, fields=[field_name],
formfield_callback=self.formfield_callback)[field_name]
if filter_field and search:
query_filter = {f'{filter_field}__{filter_search}': search}
else:
query_filter = {}
if extra_filter:
query_filter.update(extra_filter)
if hasattr(field, 'model'):
# noinspection PyUnresolvedReferences
choices = field.model.objects.filter(**query_filter)
else:
choices = field.choices.queryset.filter(**query_filter)
if page:
choices = choices[page_len * (page - 1): page_len * page + 1]
if hasattr(field, 'select_str'):
# noinspection PyCallingNonCallable
results = [{'id': str(c.id), 'text': field.select_str(c)} for c in choices[:page_len]]
else:
results = [{'id': str(c.id), 'text': str(c)} for c in choices[:page_len]]
return JsonResponse({'results': results, 'pagination': {'more': len(choices) > len(results)}})
class MultiForm:
def __init__(self, model, fields, form_id=None, initial=None, widgets=None, **kwargs):
self.model = model
self.fields = fields
self.kwargs = kwargs
self.form_id = form_id
self.initial = initial if initial else {}
self.widgets = widgets if widgets else {}
def make_form_id(self, used_ids):
if not self.form_id:
self.form_id = self.model.__name__ + 'Form'
if self.form_id in used_ids:
self.form_id += '_{}'
count = 1
while self.form_id.format(count) in used_ids:
count += 1
self.form_id = self.form_id.format(count)
used_ids.append(self.form_id)
def get_kwargs(self):
kwargs = {'form_id': self.form_id, 'initial': self.initial, 'no_buttons': True}
kwargs.update(self.kwargs)
return kwargs
class MultiFormModal(BaseModal):
template_name = 'django_modals/multi_form.html'
modal_title = ''
base_form = ModelCrispyForm
forms = []
menu_config = {'href_format': "javascript:django_modal.show_modal('{}')"}
def get_form_classes(self):
for f in self.forms:
processed_form_fields = ProcessFormFields(f.fields, widgets=f.widgets)
self.form_setup_args.append({
'form_class': modelform_factory(f.model, form=self.base_form, fields=processed_form_fields.fields,
**processed_form_fields.extra_kwargs()),
'processed_form_fields': processed_form_fields
})
def __init__(self, *args, **kwargs):
# noinspection PyArgumentList
super().__init__(*args, **kwargs)
self.form_setup_args = []
def get_form_kwargs(self):
all_kwargs = []
used_ids = []
if self.request.method in ('POST', 'PUT'):
form_data = json.loads(self.request.body)
else:
form_data = {}
for f in self.forms:
f.make_form_id(used_ids)
kwargs = f.get_kwargs()
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': form_data[f.form_id],
# 'files': self.request.FILES,
})
if hasattr(self, 'form_setup') and callable(self.form_setup):
kwargs['form_setup'] = self.form_setup
all_kwargs.append(kwargs)
all_kwargs[-1]['no_buttons'] = False
return all_kwargs
def get_forms(self):
self.get_form_classes()
form_kwargs = self.get_form_kwargs()
forms = []
for c, s in enumerate(self.form_setup_args):
kwargs = form_kwargs[c]
kwargs.update(s['processed_form_fields'].form_init_kwargs())
form = s['form_class'](**kwargs)
for field_name, field in form.fields.items():
field.widget.attrs.update({'id': f'id_{c}_{field_name}'})
forms.append(form)
return forms
def get_context_data(self, **kwargs):
self.extra_context = {
'forms': self.get_forms(),
'header_title': self.modal_title
}
context = super().get_context_data(**kwargs)
return context
def refresh_form(self, forms):
self.add_command('html', selector=f'#{forms[0].form_id}', parent=True,
html=' '.join([str(f) for f in forms]))
return self.command_response('modal_refresh_trigger', selector=f'#{forms[0].form_id}')
def forms_valid(self, forms):
pass
def post(self, request, *args, **kwargs):
post_response = super().post(request, *args, **kwargs)
if post_response:
return post_response
forms = self.get_forms()
for f in forms:
if not f.is_valid():
return self.refresh_form(forms)
return self.forms_valid({f.helper.form_id: f for f in forms})
| en | 0.497519 | # noinspection PyUnresolvedReferences # noinspection PyUnresolvedReferences # noinspection PyAttributeOutsideInit # These kwargs will be overwritten if called as_view() # noinspection PyArgumentList # noinspection PyCallingNonCallable # If permission method is not a staticmethod and function is called by class rather than instance # send None instead of self # noinspection PyProtectedMember # noinspection PyProtectedMember # noinspection PyUnresolvedReferences # noinspection PyCallingNonCallable # noinspection PyArgumentList # 'files': self.request.FILES, | 1.786116 | 2 |
TP1/finalTPScript.py | Amathlog/MTH6412B | 0 | 6612256 | # -*- coding: utf-8 -*-
import read_stsp
from graph import Graph
from node import Node
from edge import Edge
from algoMST import kruskal, prim
from TSPSolve import solveTSP
import sys
# Récupération de la liste de tous les fichiers stsp
TSPpath = "./instances/stsp/"
from os import listdir
from os.path import isfile, join
files = [f for f in listdir(TSPpath) if isfile(join(TSPpath, f))]
# Récupération des valeurs des chemins optimaux
best_ones = {}
with open("./res/bestones.txt") as fd:
for line in fd:
aux = line.split()
best_ones[aux[0]] = int(aux[2])
# Boucle sur tous les fichiers
for file in files:
with open(TSPpath + file, "r") as fd:
header = read_stsp.read_header(fd)
print 'Header: ', header
dim = header['DIMENSION']
edge_weight_format = header['EDGE_WEIGHT_FORMAT']
print "Reading nodes"
nodes = read_stsp.read_nodes(header, fd)
print "Reading edges"
edges = read_stsp.read_edges(header, fd)
edge_list = []
for k in range(dim):
edge_list.append([])
for e in edges:
if edge_weight_format in ['UPPER_ROW', 'LOWER_COL', \
'UPPER_DIAG_ROW', 'LOWER_DIAG_COL']:
edge_list[e[0]].append((e[1], int(e[2])))
else:
edge_list[e[1]].append((e[0], int(e[2])))
for k in range(dim):
edge_list[k].sort()
g = Graph(header['NAME'])
if not nodes:
for i in range(dim):
g.add_node(Node(str(i)))
else:
for i in range(dim):
g.add_node(Node(str(i), nodes[i]))
for i in range(dim):
for pair in edge_list[i]:
if i == pair[0] and pair[1] == 0:
continue
g.add_edge(Edge(g.get_nodes()[i], g.get_nodes()[pair[0]], pair[1]))
min = 2*[sys.maxsize]
travel = [None, None]
for i in range(2):
for node in g.get_nodes():
aux = solveTSP(g, node, i==0)
if(aux.get_weight() < min):
travel[i] = aux
min[i] = aux.get_weight()
filename = file.split('.')[0]
optimal = best_ones[filename]
g.plot_graph(mst=travel[0], title='Prim: ' + str(travel[0].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[0].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_prim")
g.plot_graph(mst=travel[1], title='Kruskal: ' + str(travel[1].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[1].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_kruskal") | # -*- coding: utf-8 -*-
import read_stsp
from graph import Graph
from node import Node
from edge import Edge
from algoMST import kruskal, prim
from TSPSolve import solveTSP
import sys
# Récupération de la liste de tous les fichiers stsp
TSPpath = "./instances/stsp/"
from os import listdir
from os.path import isfile, join
files = [f for f in listdir(TSPpath) if isfile(join(TSPpath, f))]
# Récupération des valeurs des chemins optimaux
best_ones = {}
with open("./res/bestones.txt") as fd:
for line in fd:
aux = line.split()
best_ones[aux[0]] = int(aux[2])
# Boucle sur tous les fichiers
for file in files:
with open(TSPpath + file, "r") as fd:
header = read_stsp.read_header(fd)
print 'Header: ', header
dim = header['DIMENSION']
edge_weight_format = header['EDGE_WEIGHT_FORMAT']
print "Reading nodes"
nodes = read_stsp.read_nodes(header, fd)
print "Reading edges"
edges = read_stsp.read_edges(header, fd)
edge_list = []
for k in range(dim):
edge_list.append([])
for e in edges:
if edge_weight_format in ['UPPER_ROW', 'LOWER_COL', \
'UPPER_DIAG_ROW', 'LOWER_DIAG_COL']:
edge_list[e[0]].append((e[1], int(e[2])))
else:
edge_list[e[1]].append((e[0], int(e[2])))
for k in range(dim):
edge_list[k].sort()
g = Graph(header['NAME'])
if not nodes:
for i in range(dim):
g.add_node(Node(str(i)))
else:
for i in range(dim):
g.add_node(Node(str(i), nodes[i]))
for i in range(dim):
for pair in edge_list[i]:
if i == pair[0] and pair[1] == 0:
continue
g.add_edge(Edge(g.get_nodes()[i], g.get_nodes()[pair[0]], pair[1]))
min = 2*[sys.maxsize]
travel = [None, None]
for i in range(2):
for node in g.get_nodes():
aux = solveTSP(g, node, i==0)
if(aux.get_weight() < min):
travel[i] = aux
min[i] = aux.get_weight()
filename = file.split('.')[0]
optimal = best_ones[filename]
g.plot_graph(mst=travel[0], title='Prim: ' + str(travel[0].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[0].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_prim")
g.plot_graph(mst=travel[1], title='Kruskal: ' + str(travel[1].get_weight()) + " ; Optimal: " + str(optimal) + " (Error: " + str(travel[1].get_weight() - optimal) + ")", show=False, filename="./res/"+filename+"_kruskal") | fr | 0.964213 | # -*- coding: utf-8 -*- # Récupération de la liste de tous les fichiers stsp # Récupération des valeurs des chemins optimaux # Boucle sur tous les fichiers | 2.809608 | 3 |
dirbot/settings.py | kyle-yu/scrape_on_lianjia | 0 | 6612257 | <gh_stars>0
# Scrapy settings for dirbot project
SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.SecondHouse'
#ITEM_PIPELINES = {'dirbot.pipelines.FilterWordsPipeline': 1}
ITEM_PIPELINES = ['dirbot.mysqlpipelines.MySQLStorePipeline']
FEED_EXPORTERS = {
'csv': 'dirbot.my_project_csv_item_exporter.MyProjectCsvItemExporter',
}
##LOG_LEVEL = 'INFO'
##LOG_ENCODING = 'utf-8'
##LOG_FILE = 'ljsechouse.log'
FIELDS_TO_EXPORT = [
'dataid',
'city',
'district',
'title',
'zonename',
'housetype',
'square',
'direction',
'remark',
'subway',
'taxfree',
'education',
'totalprice',
'perprice',
'review'
]
CSV_DELIMITER = '^'
| # Scrapy settings for dirbot project
SPIDER_MODULES = ['dirbot.spiders']
NEWSPIDER_MODULE = 'dirbot.spiders'
DEFAULT_ITEM_CLASS = 'dirbot.items.SecondHouse'
#ITEM_PIPELINES = {'dirbot.pipelines.FilterWordsPipeline': 1}
ITEM_PIPELINES = ['dirbot.mysqlpipelines.MySQLStorePipeline']
FEED_EXPORTERS = {
'csv': 'dirbot.my_project_csv_item_exporter.MyProjectCsvItemExporter',
}
##LOG_LEVEL = 'INFO'
##LOG_ENCODING = 'utf-8'
##LOG_FILE = 'ljsechouse.log'
FIELDS_TO_EXPORT = [
'dataid',
'city',
'district',
'title',
'zonename',
'housetype',
'square',
'direction',
'remark',
'subway',
'taxfree',
'education',
'totalprice',
'perprice',
'review'
]
CSV_DELIMITER = '^' | en | 0.378629 | # Scrapy settings for dirbot project #ITEM_PIPELINES = {'dirbot.pipelines.FilterWordsPipeline': 1} ##LOG_LEVEL = 'INFO' ##LOG_ENCODING = 'utf-8' ##LOG_FILE = 'ljsechouse.log' | 1.554238 | 2 |
application_form/migrations/0033_rename_apartmentqueueapplication_apartmentreservation.py | City-of-Helsinki/apartment-application-service | 1 | 6612258 | <filename>application_form/migrations/0033_rename_apartmentqueueapplication_apartmentreservation.py
# Generated by Django 3.2.6 on 2021-12-22 10:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("apartment", "0010_bigauto_pk_field"),
("application_form", "0032_drop_apartment_queue"),
]
operations = [
migrations.RenameModel(
old_name="ApartmentQueueApplication",
new_name="ApartmentReservation",
),
]
| <filename>application_form/migrations/0033_rename_apartmentqueueapplication_apartmentreservation.py
# Generated by Django 3.2.6 on 2021-12-22 10:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("apartment", "0010_bigauto_pk_field"),
("application_form", "0032_drop_apartment_queue"),
]
operations = [
migrations.RenameModel(
old_name="ApartmentQueueApplication",
new_name="ApartmentReservation",
),
]
| en | 0.911975 | # Generated by Django 3.2.6 on 2021-12-22 10:27 | 1.615601 | 2 |
code/Solution_0026_removeDuplicates.py | qizhenkang/myLeetCode | 0 | 6612259 | <reponame>qizhenkang/myLeetCode<filename>code/Solution_0026_removeDuplicates.py
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 14:19:50 2021
@author: qizhe
"""
# 这里和答案不太一样,对题目的理解有点问题
# 题目要求填入前面,后面不管,
# 答案用的是快慢指针
class Solution:
def removeDuplicates(self, nums):
Datalength = len(nums)
if Datalength <= 1:
return Datalength
# 已经升序排列,只需要两个指针即可
pointer1 = 0
pointer2 = 1
deletelength = 0
while(pointer2 < Datalength - deletelength):
if(nums[pointer1] == nums[pointer2]):
nums.pop(pointer2)
deletelength = deletelength +1
else:
pointer1 = pointer1 + 1
pointer2 = pointer2 + 1
return len(nums)
if __name__ == '__main__':
solu = Solution()
input_Str = str('{[]{}()}')
# input_list =
input_List = [0, 0,2]
input_int = 4
result = solu.removeDuplicates(input_List)
# output_Str = 'result = ' + solu.intToRoman(input_int)
output_Str = 'result = ' + str(result)
print(output_Str)
| # -*- coding: utf-8 -*-
"""
Created on Sat Aug 14 14:19:50 2021
@author: qizhe
"""
# 这里和答案不太一样,对题目的理解有点问题
# 题目要求填入前面,后面不管,
# 答案用的是快慢指针
class Solution:
def removeDuplicates(self, nums):
Datalength = len(nums)
if Datalength <= 1:
return Datalength
# 已经升序排列,只需要两个指针即可
pointer1 = 0
pointer2 = 1
deletelength = 0
while(pointer2 < Datalength - deletelength):
if(nums[pointer1] == nums[pointer2]):
nums.pop(pointer2)
deletelength = deletelength +1
else:
pointer1 = pointer1 + 1
pointer2 = pointer2 + 1
return len(nums)
if __name__ == '__main__':
solu = Solution()
input_Str = str('{[]{}()}')
# input_list =
input_List = [0, 0,2]
input_int = 4
result = solu.removeDuplicates(input_List)
# output_Str = 'result = ' + solu.intToRoman(input_int)
output_Str = 'result = ' + str(result)
print(output_Str) | zh | 0.382565 | # -*- coding: utf-8 -*- Created on Sat Aug 14 14:19:50 2021 @author: qizhe # 这里和答案不太一样,对题目的理解有点问题 # 题目要求填入前面,后面不管, # 答案用的是快慢指针 # 已经升序排列,只需要两个指针即可 # input_list = # output_Str = 'result = ' + solu.intToRoman(input_int) | 3.411044 | 3 |
systemAdmin/apps.py | SongYuQiu/Social-Network-Portrait-Analysis-System-BackCode | 0 | 6612260 | <gh_stars>0
from django.apps import AppConfig
class SystemadminConfig(AppConfig):
name = 'systemAdmin'
| from django.apps import AppConfig
class SystemadminConfig(AppConfig):
name = 'systemAdmin' | none | 1 | 1.162181 | 1 | |
mdfstudio/gui/widgets/tree_numeric.py | hyundai-autoever-opensource/mdfstudio-hkmc | 0 | 6612261 | # -*- coding: utf-8 -*-
import json
from struct import pack
from PyQt5 import QtCore, QtGui, QtWidgets
from ..utils import extract_mime_names
class NumericTreeWidget(QtWidgets.QTreeWidget):
add_channels_request = QtCore.pyqtSignal(list)
items_rearranged = QtCore.pyqtSignal()
items_deleted = QtCore.pyqtSignal(list)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setAcceptDrops(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Space:
selected_items = self.selectedItems()
if not selected_items:
return
elif len(selected_items) == 1:
item = selected_items[0]
checked = item.checkState(0)
if checked == QtCore.Qt.Checked:
item.setCheckState(0, QtCore.Qt.Unchecked)
else:
item.setCheckState(0, QtCore.Qt.Checked)
else:
if any(
item.checkState(0) == QtCore.Qt.Unchecked for item in selected_items
):
checked = QtCore.Qt.Checked
else:
checked = QtCore.Qt.Unchecked
for item in selected_items:
item.setCheckState(0, checked)
elif (
event.key() == QtCore.Qt.Key_Delete
and event.modifiers() == QtCore.Qt.NoModifier
):
selected = reversed(self.selectedItems())
names = [item.text(0) for item in selected]
for item in selected:
if item.parent() is None:
index = self.indexFromItem(item).row()
self.takeTopLevelItem(index)
else:
item.parent().removeChild(item)
self.items_deleted.emit(names)
else:
super().keyPressEvent(event)
def startDrag(self, supportedActions):
selected_items = self.selectedItems()
mimeData = QtCore.QMimeData()
data = []
for item in selected_items:
entry = item.entry
if entry == (-1, -1):
info = {
"name": item.name,
"computation": {},
}
info = json.dumps(info).encode("utf-8")
else:
info = item.name.encode("utf-8")
data.append(
pack(
f"<36s3q{len(info)}s",
str(item.mdf_uuid).encode("ascii"),
entry[0],
entry[1],
len(info),
info,
)
)
mimeData.setData(
"application/octet-stream-mdfstudio", QtCore.QByteArray(b"".join(data))
)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.exec(QtCore.Qt.CopyAction)
def dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
if e.source() is self:
super().dropEvent(e)
self.items_rearranged.emit()
else:
data = e.mimeData()
if data.hasFormat("application/octet-stream-mdfstudio"):
names = extract_mime_names(data)
self.add_channels_request.emit(names)
else:
super().dropEvent(e)
| # -*- coding: utf-8 -*-
import json
from struct import pack
from PyQt5 import QtCore, QtGui, QtWidgets
from ..utils import extract_mime_names
class NumericTreeWidget(QtWidgets.QTreeWidget):
add_channels_request = QtCore.pyqtSignal(list)
items_rearranged = QtCore.pyqtSignal()
items_deleted = QtCore.pyqtSignal(list)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.setAcceptDrops(True)
self.setDragDropMode(QtWidgets.QAbstractItemView.InternalMove)
self.setEditTriggers(QtWidgets.QAbstractItemView.DoubleClicked)
def keyPressEvent(self, event):
key = event.key()
if key == QtCore.Qt.Key_Space:
selected_items = self.selectedItems()
if not selected_items:
return
elif len(selected_items) == 1:
item = selected_items[0]
checked = item.checkState(0)
if checked == QtCore.Qt.Checked:
item.setCheckState(0, QtCore.Qt.Unchecked)
else:
item.setCheckState(0, QtCore.Qt.Checked)
else:
if any(
item.checkState(0) == QtCore.Qt.Unchecked for item in selected_items
):
checked = QtCore.Qt.Checked
else:
checked = QtCore.Qt.Unchecked
for item in selected_items:
item.setCheckState(0, checked)
elif (
event.key() == QtCore.Qt.Key_Delete
and event.modifiers() == QtCore.Qt.NoModifier
):
selected = reversed(self.selectedItems())
names = [item.text(0) for item in selected]
for item in selected:
if item.parent() is None:
index = self.indexFromItem(item).row()
self.takeTopLevelItem(index)
else:
item.parent().removeChild(item)
self.items_deleted.emit(names)
else:
super().keyPressEvent(event)
def startDrag(self, supportedActions):
selected_items = self.selectedItems()
mimeData = QtCore.QMimeData()
data = []
for item in selected_items:
entry = item.entry
if entry == (-1, -1):
info = {
"name": item.name,
"computation": {},
}
info = json.dumps(info).encode("utf-8")
else:
info = item.name.encode("utf-8")
data.append(
pack(
f"<36s3q{len(info)}s",
str(item.mdf_uuid).encode("ascii"),
entry[0],
entry[1],
len(info),
info,
)
)
mimeData.setData(
"application/octet-stream-mdfstudio", QtCore.QByteArray(b"".join(data))
)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.exec(QtCore.Qt.CopyAction)
def dragEnterEvent(self, e):
e.accept()
def dropEvent(self, e):
if e.source() is self:
super().dropEvent(e)
self.items_rearranged.emit()
else:
data = e.mimeData()
if data.hasFormat("application/octet-stream-mdfstudio"):
names = extract_mime_names(data)
self.add_channels_request.emit(names)
else:
super().dropEvent(e)
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.204024 | 2 |
day4-2.py | vicyyn/AdventOfCode | 0 | 6612262 | import re
import string
fields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']
ecl_fields = 'amb blu brn gry grn hzl oth'.split(' ')
def process(passport):
if(all(substring in passport for substring in fields)):
passportList = re.split(' ',passport)
passportList = [i for i in passportList if(i != '')]
for element in passportList:
if(fields[0] in element):
element = element.split(':')[1]
if(int(element) < 1920 or int(element) > 2002):
return(False)
if(fields[1] in element):
element = element.split(':')[1]
if(int(element) < 2010 or int(element) > 2020):
return(False)
if(fields[2] in element):
element = element.split(':')[1]
if(int(element) < 2020 or int(element) > 2030):
return(False)
if(fields[3] in element):
element = element.split(':')[1]
if('cm' in element):
element = element.replace('cm','')
if(int(element) < 150 or int(element) > 193):
return(False)
if('in' in element):
element = element.replace('in','')
if(int(element) < 59 or int(element) > 76):
return(False)
if(fields[4] in element):
element = element.split(':')[1]
if(element[0] != '#'):
return(False)
element = element.replace('#','')
if(any(c not in string.hexdigits for c in element) and len(element) != 6):
return(False)
if(fields[5] in element):
if(all(substring not in element for substring in ecl_fields)):
return(False)
if(fields[6] in element):
element = element.split(':')[1]
if(not re.match('^[0-9]{9}$',element)):
return False
return True
else:
return False
with open("input.txt") as f:
dat = f.readlines()
dat = [line.strip() for line in dat]
total = 0
currentPassport = ''
for i in dat:
if i != '':
currentPassport += ' ' + i
else:
if(process(currentPassport)):
total += 1
currentPassport = ''
if(process(currentPassport)):
total += 1
print(total)
| import re
import string
fields = ['byr','iyr','eyr','hgt','hcl','ecl','pid']
ecl_fields = 'amb blu brn gry grn hzl oth'.split(' ')
def process(passport):
if(all(substring in passport for substring in fields)):
passportList = re.split(' ',passport)
passportList = [i for i in passportList if(i != '')]
for element in passportList:
if(fields[0] in element):
element = element.split(':')[1]
if(int(element) < 1920 or int(element) > 2002):
return(False)
if(fields[1] in element):
element = element.split(':')[1]
if(int(element) < 2010 or int(element) > 2020):
return(False)
if(fields[2] in element):
element = element.split(':')[1]
if(int(element) < 2020 or int(element) > 2030):
return(False)
if(fields[3] in element):
element = element.split(':')[1]
if('cm' in element):
element = element.replace('cm','')
if(int(element) < 150 or int(element) > 193):
return(False)
if('in' in element):
element = element.replace('in','')
if(int(element) < 59 or int(element) > 76):
return(False)
if(fields[4] in element):
element = element.split(':')[1]
if(element[0] != '#'):
return(False)
element = element.replace('#','')
if(any(c not in string.hexdigits for c in element) and len(element) != 6):
return(False)
if(fields[5] in element):
if(all(substring not in element for substring in ecl_fields)):
return(False)
if(fields[6] in element):
element = element.split(':')[1]
if(not re.match('^[0-9]{9}$',element)):
return False
return True
else:
return False
with open("input.txt") as f:
dat = f.readlines()
dat = [line.strip() for line in dat]
total = 0
currentPassport = ''
for i in dat:
if i != '':
currentPassport += ' ' + i
else:
if(process(currentPassport)):
total += 1
currentPassport = ''
if(process(currentPassport)):
total += 1
print(total)
| none | 1 | 3.161543 | 3 | |
tests/elf/add_segment.py | aguinet/LIEF | 0 | 6612263 | #!/usr/bin/env python
import unittest
import logging
import os
import sys
import stat
import re
import subprocess
import tempfile
import shutil
from subprocess import Popen
import lief
from lief.ELF import Segment
from unittest import TestCase
from utils import get_sample
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
STUB = lief.parse(os.path.join(CURRENT_DIRECTORY, "hello_lief.bin"))
class TestAddSegment(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.tmp_dir = tempfile.mkdtemp(suffix='_lief_testhash')
self.logger.debug("temp dir: {}".format(self.tmp_dir))
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_simple(self):
sample_path = get_sample('ELF/ELF64_x86-64_binary_ls.bin')
output = os.path.join(self.tmp_dir, "ls.segment")
ls = lief.parse(sample_path)
segment = Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.flag = lief.ELF.SEGMENT_FLAGS.PF_R | lief.ELF.SEGMENT_FLAGS.PF_W | lief.ELF.SEGMENT_FLAGS.PF_X
segment.content = STUB.segments[0].content # First LOAD segment which holds payload
segment.alignment = 8
segment = ls.add_segment(segment, base=0xA00000, force_note=True)
ls.header.entrypoint = segment.virtual_address + STUB.header.entrypoint
ls.write(output)
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
p = Popen(output, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
self.logger.debug(stdout.decode("utf8"))
self.assertIsNotNone(re.search(r'LIEF is Working', stdout.decode("utf8")))
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_gcc(self):
sample_path = get_sample('ELF/ELF64_x86-64_binary_gcc.bin')
output = os.path.join(self.tmp_dir, "gcc.segment")
gcc = lief.parse(sample_path)
segment = Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.flag = lief.ELF.SEGMENT_FLAGS.PF_R | lief.ELF.SEGMENT_FLAGS.PF_W | lief.ELF.SEGMENT_FLAGS.PF_X
segment.content = STUB.segments[0].content # First LOAD segment which holds payload
segment.alignment = 8
segment = gcc.add_segment(segment, base=0xA00000, force_note=True)
gcc.header.entrypoint = segment.virtual_address + STUB.header.entrypoint
gcc.write(output)
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
p = Popen(output, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
self.logger.debug(stdout.decode("utf8"))
self.assertIsNotNone(re.search(r'LIEF is Working', stdout.decode("utf8")))
def tearDown(self):
# Delete it
if os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
| #!/usr/bin/env python
import unittest
import logging
import os
import sys
import stat
import re
import subprocess
import tempfile
import shutil
from subprocess import Popen
import lief
from lief.ELF import Segment
from unittest import TestCase
from utils import get_sample
CURRENT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))
STUB = lief.parse(os.path.join(CURRENT_DIRECTORY, "hello_lief.bin"))
class TestAddSegment(TestCase):
def setUp(self):
self.logger = logging.getLogger(__name__)
self.tmp_dir = tempfile.mkdtemp(suffix='_lief_testhash')
self.logger.debug("temp dir: {}".format(self.tmp_dir))
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_simple(self):
sample_path = get_sample('ELF/ELF64_x86-64_binary_ls.bin')
output = os.path.join(self.tmp_dir, "ls.segment")
ls = lief.parse(sample_path)
segment = Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.flag = lief.ELF.SEGMENT_FLAGS.PF_R | lief.ELF.SEGMENT_FLAGS.PF_W | lief.ELF.SEGMENT_FLAGS.PF_X
segment.content = STUB.segments[0].content # First LOAD segment which holds payload
segment.alignment = 8
segment = ls.add_segment(segment, base=0xA00000, force_note=True)
ls.header.entrypoint = segment.virtual_address + STUB.header.entrypoint
ls.write(output)
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
p = Popen(output, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
self.logger.debug(stdout.decode("utf8"))
self.assertIsNotNone(re.search(r'LIEF is Working', stdout.decode("utf8")))
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_gcc(self):
sample_path = get_sample('ELF/ELF64_x86-64_binary_gcc.bin')
output = os.path.join(self.tmp_dir, "gcc.segment")
gcc = lief.parse(sample_path)
segment = Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.flag = lief.ELF.SEGMENT_FLAGS.PF_R | lief.ELF.SEGMENT_FLAGS.PF_W | lief.ELF.SEGMENT_FLAGS.PF_X
segment.content = STUB.segments[0].content # First LOAD segment which holds payload
segment.alignment = 8
segment = gcc.add_segment(segment, base=0xA00000, force_note=True)
gcc.header.entrypoint = segment.virtual_address + STUB.header.entrypoint
gcc.write(output)
st = os.stat(output)
os.chmod(output, st.st_mode | stat.S_IEXEC)
p = Popen(output, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
self.logger.debug(stdout.decode("utf8"))
self.assertIsNotNone(re.search(r'LIEF is Working', stdout.decode("utf8")))
def tearDown(self):
# Delete it
if os.path.isdir(self.tmp_dir):
shutil.rmtree(self.tmp_dir)
if __name__ == '__main__':
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
root_logger.addHandler(ch)
unittest.main(verbosity=2)
| en | 0.921246 | #!/usr/bin/env python # First LOAD segment which holds payload # First LOAD segment which holds payload # Delete it | 2.252384 | 2 |
awslambdahellojson/lambda_function.py | krlex/aws-python-examples | 21 | 6612264 | <reponame>krlex/aws-python-examples
# lamdda_function.py
# It handles a simple AWS Lambda function that shows the content (JSON) of the call
# to the lambda function and returns a message including this content.
def lambda_handler(event, context):
message = 'Hello {} {}!'.format(event['first_name'],
event['last_name'])
return {
'message' : message
}
| # lamdda_function.py
# It handles a simple AWS Lambda function that shows the content (JSON) of the call
# to the lambda function and returns a message including this content.
def lambda_handler(event, context):
message = 'Hello {} {}!'.format(event['first_name'],
event['last_name'])
return {
'message' : message
} | en | 0.599767 | # lamdda_function.py # It handles a simple AWS Lambda function that shows the content (JSON) of the call # to the lambda function and returns a message including this content. | 2.730443 | 3 |
src/spyd/game/client/message_handlers/editent_handler.py | DanSeraf/spyd | 4 | 6612265 | <reponame>DanSeraf/spyd
from spyd.registry_manager import register
from spyd.utils.dictionary_get import dictget
@register('client_message_handler')
class EditentHandler(object):
message_type = 'N_EDITENT'
@staticmethod
def handle(client, room, message):
player = client.get_player()
entity_id = message['entid']
entity_type = message['type']
x, y, z = dictget(message, 'x', 'y', 'z')
attrs = message['attrs']
room.handle_player_event('edit_entity', player, entity_id, entity_type, x, y, z, attrs)
| from spyd.registry_manager import register
from spyd.utils.dictionary_get import dictget
@register('client_message_handler')
class EditentHandler(object):
message_type = 'N_EDITENT'
@staticmethod
def handle(client, room, message):
player = client.get_player()
entity_id = message['entid']
entity_type = message['type']
x, y, z = dictget(message, 'x', 'y', 'z')
attrs = message['attrs']
room.handle_player_event('edit_entity', player, entity_id, entity_type, x, y, z, attrs) | none | 1 | 2.052777 | 2 | |
tests/unit/test_vector_mcmc.py | SauravMaheshkar/SMCPy | 57 | 6612266 | <gh_stars>10-100
import numpy as np
import pytest
import sys
from smcpy.mcmc.vector_mcmc import *
@pytest.fixture
def stub_model():
x = np.array([1, 2, 3])
def evaluation(q):
assert q.shape[1] == 3
output = (q[:, 0, None] * 2 + 3.25 * q[:, 1, None] - \
q[:, 2, None] ** 2) * x
return output
return evaluation
@pytest.fixture
def data():
return np.array([5, 4, 9])
@pytest.fixture
def priors(mocker):
priors = [mocker.Mock() for _ in range(3)]
for i, p in enumerate(priors):
p.rvs = lambda x, i=i: np.array([1 + i] * x)
p.pdf = lambda x, i=i: x + i
delattr(p, 'dim')
multivar_prior = mocker.Mock()
multivar_prior.rvs = lambda x: np.tile([4, 5, 6], (x, 1))
multivar_prior.pdf = lambda x: np.sum(x - 0.5, axis=1)
multivar_prior.dim = 3
priors.append(multivar_prior)
return priors
@pytest.fixture
def vector_mcmc(stub_model, data, priors):
return VectorMCMC(stub_model, data, priors, log_like_args=None)
@pytest.mark.parametrize('num_samples', [1, 2, 4])
def test_vectorized_prior_sampling(vector_mcmc, num_samples):
prior_samples = vector_mcmc.sample_from_priors(num_samples=num_samples)
expected_samples = np.tile([1, 2, 3, 4, 5, 6], (num_samples, 1))
np.testing.assert_array_equal(prior_samples, expected_samples)
@pytest.mark.parametrize('inputs', (np.array([[0.1, 1, 0.5, 3, 2, 1]]),
np.array([[0.1, 1, 0.5, 3, 2, 1]] * 4)))
def test_vectorized_prior(vector_mcmc, inputs):
log_prior = vector_mcmc.evaluate_log_priors(inputs)
expected_prior = np.array([[0.1, 2, 2.5, 4.5]] * inputs.shape[0])
np.testing.assert_array_almost_equal(log_prior, np.log(expected_prior))
@pytest.mark.parametrize('inputs', (np.array([[0.1, 0.5]]),
np.array([[0.1, 1, 1, 0.5]] * 4)))
def test_prior_input_mismatch_throws_error(vector_mcmc, inputs):
with pytest.raises(ValueError):
vector_mcmc.evaluate_log_priors(inputs)
@pytest.mark.parametrize('inputs, std_dev', (
((np.array([[0, 1, 0.5]]), 1/np.sqrt(2 * np.pi)),
(np.array([[0, 1, 0.5]] * 4), 1/np.sqrt(2 * np.pi)),
(np.array([[0, 1, 0.5, 1/np.sqrt(2 * np.pi)]] * 4), None)
)))
def test_vectorized_default_likelihood(vector_mcmc, inputs, std_dev):
vector_mcmc._log_like_func._args = std_dev
expected_like = np.array(inputs.shape[0] * [[np.exp(-8 * np.pi)]])
expected_log_like = np.log(expected_like)
log_like = vector_mcmc.evaluate_log_likelihood(inputs)
np.testing.assert_array_almost_equal(log_like, expected_log_like)
@pytest.mark.parametrize('inputs', (np.array([[0, 1, 0.5]]),
np.array([[0, 1, 0.5]] * 4)))
def test_vectorized_proposal(vector_mcmc, inputs, mocker):
mvn_mock = mocker.patch('numpy.random.multivariate_normal',
return_value=np.ones(inputs.shape))
cov = np.eye(inputs.shape[1])
cov_scale = 1 #2.38 ** 2 / inputs.shape[1]
expected = inputs + 1
inputs_new = vector_mcmc.proposal(inputs, cov=cov)
calls = mvn_mock.call_args[0]
np.testing.assert_array_equal(inputs_new, expected)
np.testing.assert_array_equal(calls[0], np.zeros(cov.shape[0]))
np.testing.assert_array_equal(calls[1], cov_scale * cov)
np.testing.assert_array_equal(calls[2], inputs.shape[0])
@pytest.mark.parametrize('new_inputs, old_inputs', (
(np.array([[1, 1, 1]]), np.array([[2, 2, 2]])),
(np.array([[1, 1, 1]] * 4), np.array([[2, 2, 2]] * 4))))
def test_vectorized_accept_ratio(vector_mcmc, new_inputs, old_inputs, mocker):
mocked_new_log_likelihood = np.ones([new_inputs.shape[0], 1])
mocked_old_log_likelihood = np.ones([new_inputs.shape[0], 1])
mocked_new_log_priors = new_inputs
mocked_old_log_priors = old_inputs
accpt_ratio = vector_mcmc.acceptance_ratio(mocked_new_log_likelihood,
mocked_old_log_likelihood,
mocked_new_log_priors,
mocked_old_log_priors)
expected = np.exp(np.array([[4 - 7.]] * new_inputs.shape[0]))
np.testing.assert_array_equal(accpt_ratio, expected)
@pytest.mark.parametrize('new_inputs, old_inputs', (
(np.array([[1, 1, 1]]), np.array([[2, 2, 2]])),
(np.array([[1, 1, 1]] * 4), np.array([[2, 2, 2]] * 4))))
def test_vectorized_selection(vector_mcmc, new_inputs, old_inputs, mocker):
mocked_uniform_samples = np.ones([new_inputs.shape[0], 1]) * 0.5
acceptance_ratios = np.c_[[0.25, 1.2, 0.25, 0.75]][:new_inputs.shape[0]]
accepted_inputs = vector_mcmc.selection(new_inputs, old_inputs,
acceptance_ratios,
mocked_uniform_samples)
expected = np.array([[2, 2, 2], [1, 1, 1], [2, 2, 2], [1, 1, 1]])
np.testing.assert_array_equal(accepted_inputs,
expected[:new_inputs.shape[0]])
@pytest.mark.parametrize('adapt_interval,adapt_delay,adapt',
[(3, 0, False), (4, 0, True), (8, 0, True), (11, 0, False),
(3, 5, True), (4, 5, False), (8, 5, False), (2, 1, False),
(3, 2, True), (None, 1, False), (2, 8, True)])
def test_vectorized_proposal_adaptation(vector_mcmc, adapt_interval, adapt,
adapt_delay, mocker):
parallel_chains = 3
sample_count = 8
total_samples = sample_count + 1
old_cov = np.eye(2)
chain = np.zeros([parallel_chains, 2, total_samples])
cov_mock = mocker.patch('numpy.cov', return_value=np.eye(2) * 2)
cov = vector_mcmc.adapt_proposal_cov(old_cov, chain, sample_count,
adapt_interval, adapt_delay)
expected_cov = old_cov
if adapt:
expected_cov = np.eye(2) * 2
if sample_count > adapt_delay:
n_samples_for_cov_calc = (sample_count - adapt_delay)
else:
n_samples_for_cov_calc = adapt_interval
expected_call = np.zeros((2, parallel_chains * n_samples_for_cov_calc))
np.testing.assert_array_equal(cov_mock.call_args[0][0], expected_call)
np.testing.assert_array_equal(cov, expected_cov)
@pytest.mark.parametrize('phi', (0.5, 1))
@pytest.mark.parametrize('num_samples', (1, 2))
def test_vectorized_smc_metropolis(vector_mcmc, phi, num_samples, mocker):
inputs = np.ones([10, 3])
cov = np.eye(3)
vector_mcmc._std_dev = 1
vector_mcmc._priors = vector_mcmc._priors[:3] # drop mvn
mocker.patch('numpy.random.uniform')
mocker.patch.object(vector_mcmc, 'acceptance_ratio', return_value=inputs)
mocker.patch.object(vector_mcmc, 'proposal',
side_effect=[inputs + 1, inputs + 2])
mocker.patch.object(vector_mcmc, 'selection',
new=lambda new_log_like, x, y, z: new_log_like)
log_like = mocker.patch.object(vector_mcmc, 'evaluate_log_likelihood',
return_value=inputs[:, 0].reshape(-1, 1) * 2)
new_inputs, new_log_likes = vector_mcmc.smc_metropolis(inputs, num_samples,
cov, phi)
expected_new_inputs = inputs + num_samples
expected_new_log_likes = inputs[:, 0].reshape(-1, 1) * 2
np.testing.assert_array_equal(new_inputs, expected_new_inputs)
np.testing.assert_array_equal(new_log_likes, expected_new_log_likes)
assert log_like.call_count == num_samples + 1
@pytest.mark.parametrize('num_samples', (1, 5, 50))
def test_vectorized_metropolis(vector_mcmc, num_samples, mocker):
inputs = np.ones([10, 3])
cov = np.eye(3)
vector_mcmc._std_dev = 1
adapt_delay = 0
adapt_interval = 1
vector_mcmc._priors = vector_mcmc._priors[:3] # drop mvn
mocker.patch('numpy.random.uniform')
mocker.patch.object(vector_mcmc, 'acceptance_ratio', return_value=inputs)
mocker.patch.object(vector_mcmc, 'proposal',
side_effect=[inputs + i for i in range(1, num_samples + 1)])
mocker.patch.object(vector_mcmc, 'selection',
new=lambda new_log_like, x, y, z: new_log_like)
log_like = mocker.patch.object(vector_mcmc, 'evaluate_log_likelihood',
return_value=np.zeros((inputs.shape[0], 1)))
adapt = mocker.patch.object(vector_mcmc, 'adapt_proposal_cov')
expected_chain = np.zeros([10, 3, num_samples + 1])
expected_chain[:, :, 0] = inputs
for i in range(num_samples):
expected_chain[:, :, i + 1] = expected_chain[:, :, i].copy() + 1
chain = vector_mcmc.metropolis(inputs, num_samples, cov, adapt_interval,
adapt_delay)
np.testing.assert_array_equal(chain, expected_chain)
assert log_like.call_count == num_samples + 1
assert adapt.call_count == num_samples
@pytest.mark.parametrize('num_chains', (1, 3, 5))
@pytest.mark.parametrize('method', ('smc_metropolis', 'metropolis'))
def test_metropolis_inputs_out_of_bounds(mocker, stub_model, data, num_chains,
method):
vmcmc = VectorMCMC(stub_model, data, priors, log_like_args=1)
mocker.patch.object(vmcmc, 'evaluate_log_priors',
return_value=np.ones((num_chains, 1)) * -np.inf)
with pytest.raises(ValueError):
vmcmc.__getattribute__(method)(np.ones((1, 3)), num_samples=0, cov=None,
phi=None)
def test_metropolis_no_like_calc_if_zero_prior_prob(mocker, data):
mocked_model = mocker.Mock(side_effect=[np.ones((5, 3)),
np.tile(data, (3, 1))])
input_params = np.ones((5,3)) * 0.1
some_zero_priors = np.ones((5, 3)) * 2
some_zero_priors[1, 0] = -np.inf
some_zero_priors[4, 2] = -np.inf
mocked_proposal = np.ones((5, 3)) * 0.2
mocked_proposal[2, 0] = 0.3
vmcmc = VectorMCMC(mocked_model, data, priors=None, log_like_args=1)
mocker.patch.object(vmcmc, '_check_log_priors_for_zero_probability')
mocker.patch.object(vmcmc, 'proposal', return_value=mocked_proposal)
mocker.patch.object(vmcmc, 'evaluate_log_priors',
side_effect=[np.ones((5, 3)), some_zero_priors])
expected_chain = np.zeros((5, 3, 2))
expected_chain[:, :, 0] = input_params
expected_chain[:, :, 1] = mocked_proposal
expected_chain[1, :, 1] = expected_chain[1, :, 0]
expected_chain[4, :, 1] = expected_chain[1, :, 0]
chain = vmcmc.metropolis(input_params, num_samples=1, cov=np.eye(3))
np.testing.assert_array_equal(mocked_model.call_args[0][0],
mocked_proposal[[0, 2, 3]])
np.testing.assert_array_equal(chain, expected_chain)
| import numpy as np
import pytest
import sys
from smcpy.mcmc.vector_mcmc import *
@pytest.fixture
def stub_model():
x = np.array([1, 2, 3])
def evaluation(q):
assert q.shape[1] == 3
output = (q[:, 0, None] * 2 + 3.25 * q[:, 1, None] - \
q[:, 2, None] ** 2) * x
return output
return evaluation
@pytest.fixture
def data():
return np.array([5, 4, 9])
@pytest.fixture
def priors(mocker):
priors = [mocker.Mock() for _ in range(3)]
for i, p in enumerate(priors):
p.rvs = lambda x, i=i: np.array([1 + i] * x)
p.pdf = lambda x, i=i: x + i
delattr(p, 'dim')
multivar_prior = mocker.Mock()
multivar_prior.rvs = lambda x: np.tile([4, 5, 6], (x, 1))
multivar_prior.pdf = lambda x: np.sum(x - 0.5, axis=1)
multivar_prior.dim = 3
priors.append(multivar_prior)
return priors
@pytest.fixture
def vector_mcmc(stub_model, data, priors):
return VectorMCMC(stub_model, data, priors, log_like_args=None)
@pytest.mark.parametrize('num_samples', [1, 2, 4])
def test_vectorized_prior_sampling(vector_mcmc, num_samples):
prior_samples = vector_mcmc.sample_from_priors(num_samples=num_samples)
expected_samples = np.tile([1, 2, 3, 4, 5, 6], (num_samples, 1))
np.testing.assert_array_equal(prior_samples, expected_samples)
@pytest.mark.parametrize('inputs', (np.array([[0.1, 1, 0.5, 3, 2, 1]]),
np.array([[0.1, 1, 0.5, 3, 2, 1]] * 4)))
def test_vectorized_prior(vector_mcmc, inputs):
log_prior = vector_mcmc.evaluate_log_priors(inputs)
expected_prior = np.array([[0.1, 2, 2.5, 4.5]] * inputs.shape[0])
np.testing.assert_array_almost_equal(log_prior, np.log(expected_prior))
@pytest.mark.parametrize('inputs', (np.array([[0.1, 0.5]]),
np.array([[0.1, 1, 1, 0.5]] * 4)))
def test_prior_input_mismatch_throws_error(vector_mcmc, inputs):
with pytest.raises(ValueError):
vector_mcmc.evaluate_log_priors(inputs)
@pytest.mark.parametrize('inputs, std_dev', (
((np.array([[0, 1, 0.5]]), 1/np.sqrt(2 * np.pi)),
(np.array([[0, 1, 0.5]] * 4), 1/np.sqrt(2 * np.pi)),
(np.array([[0, 1, 0.5, 1/np.sqrt(2 * np.pi)]] * 4), None)
)))
def test_vectorized_default_likelihood(vector_mcmc, inputs, std_dev):
vector_mcmc._log_like_func._args = std_dev
expected_like = np.array(inputs.shape[0] * [[np.exp(-8 * np.pi)]])
expected_log_like = np.log(expected_like)
log_like = vector_mcmc.evaluate_log_likelihood(inputs)
np.testing.assert_array_almost_equal(log_like, expected_log_like)
@pytest.mark.parametrize('inputs', (np.array([[0, 1, 0.5]]),
np.array([[0, 1, 0.5]] * 4)))
def test_vectorized_proposal(vector_mcmc, inputs, mocker):
mvn_mock = mocker.patch('numpy.random.multivariate_normal',
return_value=np.ones(inputs.shape))
cov = np.eye(inputs.shape[1])
cov_scale = 1 #2.38 ** 2 / inputs.shape[1]
expected = inputs + 1
inputs_new = vector_mcmc.proposal(inputs, cov=cov)
calls = mvn_mock.call_args[0]
np.testing.assert_array_equal(inputs_new, expected)
np.testing.assert_array_equal(calls[0], np.zeros(cov.shape[0]))
np.testing.assert_array_equal(calls[1], cov_scale * cov)
np.testing.assert_array_equal(calls[2], inputs.shape[0])
@pytest.mark.parametrize('new_inputs, old_inputs', (
(np.array([[1, 1, 1]]), np.array([[2, 2, 2]])),
(np.array([[1, 1, 1]] * 4), np.array([[2, 2, 2]] * 4))))
def test_vectorized_accept_ratio(vector_mcmc, new_inputs, old_inputs, mocker):
mocked_new_log_likelihood = np.ones([new_inputs.shape[0], 1])
mocked_old_log_likelihood = np.ones([new_inputs.shape[0], 1])
mocked_new_log_priors = new_inputs
mocked_old_log_priors = old_inputs
accpt_ratio = vector_mcmc.acceptance_ratio(mocked_new_log_likelihood,
mocked_old_log_likelihood,
mocked_new_log_priors,
mocked_old_log_priors)
expected = np.exp(np.array([[4 - 7.]] * new_inputs.shape[0]))
np.testing.assert_array_equal(accpt_ratio, expected)
@pytest.mark.parametrize('new_inputs, old_inputs', (
(np.array([[1, 1, 1]]), np.array([[2, 2, 2]])),
(np.array([[1, 1, 1]] * 4), np.array([[2, 2, 2]] * 4))))
def test_vectorized_selection(vector_mcmc, new_inputs, old_inputs, mocker):
mocked_uniform_samples = np.ones([new_inputs.shape[0], 1]) * 0.5
acceptance_ratios = np.c_[[0.25, 1.2, 0.25, 0.75]][:new_inputs.shape[0]]
accepted_inputs = vector_mcmc.selection(new_inputs, old_inputs,
acceptance_ratios,
mocked_uniform_samples)
expected = np.array([[2, 2, 2], [1, 1, 1], [2, 2, 2], [1, 1, 1]])
np.testing.assert_array_equal(accepted_inputs,
expected[:new_inputs.shape[0]])
@pytest.mark.parametrize('adapt_interval,adapt_delay,adapt',
[(3, 0, False), (4, 0, True), (8, 0, True), (11, 0, False),
(3, 5, True), (4, 5, False), (8, 5, False), (2, 1, False),
(3, 2, True), (None, 1, False), (2, 8, True)])
def test_vectorized_proposal_adaptation(vector_mcmc, adapt_interval, adapt,
adapt_delay, mocker):
parallel_chains = 3
sample_count = 8
total_samples = sample_count + 1
old_cov = np.eye(2)
chain = np.zeros([parallel_chains, 2, total_samples])
cov_mock = mocker.patch('numpy.cov', return_value=np.eye(2) * 2)
cov = vector_mcmc.adapt_proposal_cov(old_cov, chain, sample_count,
adapt_interval, adapt_delay)
expected_cov = old_cov
if adapt:
expected_cov = np.eye(2) * 2
if sample_count > adapt_delay:
n_samples_for_cov_calc = (sample_count - adapt_delay)
else:
n_samples_for_cov_calc = adapt_interval
expected_call = np.zeros((2, parallel_chains * n_samples_for_cov_calc))
np.testing.assert_array_equal(cov_mock.call_args[0][0], expected_call)
np.testing.assert_array_equal(cov, expected_cov)
@pytest.mark.parametrize('phi', (0.5, 1))
@pytest.mark.parametrize('num_samples', (1, 2))
def test_vectorized_smc_metropolis(vector_mcmc, phi, num_samples, mocker):
inputs = np.ones([10, 3])
cov = np.eye(3)
vector_mcmc._std_dev = 1
vector_mcmc._priors = vector_mcmc._priors[:3] # drop mvn
mocker.patch('numpy.random.uniform')
mocker.patch.object(vector_mcmc, 'acceptance_ratio', return_value=inputs)
mocker.patch.object(vector_mcmc, 'proposal',
side_effect=[inputs + 1, inputs + 2])
mocker.patch.object(vector_mcmc, 'selection',
new=lambda new_log_like, x, y, z: new_log_like)
log_like = mocker.patch.object(vector_mcmc, 'evaluate_log_likelihood',
return_value=inputs[:, 0].reshape(-1, 1) * 2)
new_inputs, new_log_likes = vector_mcmc.smc_metropolis(inputs, num_samples,
cov, phi)
expected_new_inputs = inputs + num_samples
expected_new_log_likes = inputs[:, 0].reshape(-1, 1) * 2
np.testing.assert_array_equal(new_inputs, expected_new_inputs)
np.testing.assert_array_equal(new_log_likes, expected_new_log_likes)
assert log_like.call_count == num_samples + 1
@pytest.mark.parametrize('num_samples', (1, 5, 50))
def test_vectorized_metropolis(vector_mcmc, num_samples, mocker):
inputs = np.ones([10, 3])
cov = np.eye(3)
vector_mcmc._std_dev = 1
adapt_delay = 0
adapt_interval = 1
vector_mcmc._priors = vector_mcmc._priors[:3] # drop mvn
mocker.patch('numpy.random.uniform')
mocker.patch.object(vector_mcmc, 'acceptance_ratio', return_value=inputs)
mocker.patch.object(vector_mcmc, 'proposal',
side_effect=[inputs + i for i in range(1, num_samples + 1)])
mocker.patch.object(vector_mcmc, 'selection',
new=lambda new_log_like, x, y, z: new_log_like)
log_like = mocker.patch.object(vector_mcmc, 'evaluate_log_likelihood',
return_value=np.zeros((inputs.shape[0], 1)))
adapt = mocker.patch.object(vector_mcmc, 'adapt_proposal_cov')
expected_chain = np.zeros([10, 3, num_samples + 1])
expected_chain[:, :, 0] = inputs
for i in range(num_samples):
expected_chain[:, :, i + 1] = expected_chain[:, :, i].copy() + 1
chain = vector_mcmc.metropolis(inputs, num_samples, cov, adapt_interval,
adapt_delay)
np.testing.assert_array_equal(chain, expected_chain)
assert log_like.call_count == num_samples + 1
assert adapt.call_count == num_samples
@pytest.mark.parametrize('num_chains', (1, 3, 5))
@pytest.mark.parametrize('method', ('smc_metropolis', 'metropolis'))
def test_metropolis_inputs_out_of_bounds(mocker, stub_model, data, num_chains,
method):
vmcmc = VectorMCMC(stub_model, data, priors, log_like_args=1)
mocker.patch.object(vmcmc, 'evaluate_log_priors',
return_value=np.ones((num_chains, 1)) * -np.inf)
with pytest.raises(ValueError):
vmcmc.__getattribute__(method)(np.ones((1, 3)), num_samples=0, cov=None,
phi=None)
def test_metropolis_no_like_calc_if_zero_prior_prob(mocker, data):
mocked_model = mocker.Mock(side_effect=[np.ones((5, 3)),
np.tile(data, (3, 1))])
input_params = np.ones((5,3)) * 0.1
some_zero_priors = np.ones((5, 3)) * 2
some_zero_priors[1, 0] = -np.inf
some_zero_priors[4, 2] = -np.inf
mocked_proposal = np.ones((5, 3)) * 0.2
mocked_proposal[2, 0] = 0.3
vmcmc = VectorMCMC(mocked_model, data, priors=None, log_like_args=1)
mocker.patch.object(vmcmc, '_check_log_priors_for_zero_probability')
mocker.patch.object(vmcmc, 'proposal', return_value=mocked_proposal)
mocker.patch.object(vmcmc, 'evaluate_log_priors',
side_effect=[np.ones((5, 3)), some_zero_priors])
expected_chain = np.zeros((5, 3, 2))
expected_chain[:, :, 0] = input_params
expected_chain[:, :, 1] = mocked_proposal
expected_chain[1, :, 1] = expected_chain[1, :, 0]
expected_chain[4, :, 1] = expected_chain[1, :, 0]
chain = vmcmc.metropolis(input_params, num_samples=1, cov=np.eye(3))
np.testing.assert_array_equal(mocked_model.call_args[0][0],
mocked_proposal[[0, 2, 3]])
np.testing.assert_array_equal(chain, expected_chain) | en | 0.582406 | #2.38 ** 2 / inputs.shape[1] # drop mvn # drop mvn | 2.187931 | 2 |
tests/UART.py | ni/NI-ELVIS-III-Python-Examples | 3 | 6612267 | """
Use GY25 chip.
Hardware setup:
1. Connect connector A UART.RX (DIO16) to UART.TX of a device.
2. Connect connector A UART.TX (DIO17) to UART.RX of a device.
"""
import time
import unittest
from nielvis import UART, Bank, UARTBaudRate, UARTDataBits, UARTStopBits, UARTParity
bank = Bank.A
baud_rate = UARTBaudRate.RATE115200
data_bits = UARTDataBits.BITS8
stop_bits = UARTStopBits.ONE
parity = UARTParity.NO
class Test_UART(unittest.TestCase):
@classmethod
def setUpClass(self):
self.uart = UART(bank,baud_rate, data_bits, stop_bits, parity)
@classmethod
def tearDownClass(self):
self.uart.close()
def test_WriteAndRead1Byte_ReturnExpectedValue(self):
self.uart.write(b'\xA5')
self.uart.write(b'\x51')
time.sleep(0.1)
bytes_to_read = 1
return_value = self.uart.read(bytes_to_read)
self.assertEqual(return_value, b'\xAA')
| """
Use GY25 chip.
Hardware setup:
1. Connect connector A UART.RX (DIO16) to UART.TX of a device.
2. Connect connector A UART.TX (DIO17) to UART.RX of a device.
"""
import time
import unittest
from nielvis import UART, Bank, UARTBaudRate, UARTDataBits, UARTStopBits, UARTParity
bank = Bank.A
baud_rate = UARTBaudRate.RATE115200
data_bits = UARTDataBits.BITS8
stop_bits = UARTStopBits.ONE
parity = UARTParity.NO
class Test_UART(unittest.TestCase):
@classmethod
def setUpClass(self):
self.uart = UART(bank,baud_rate, data_bits, stop_bits, parity)
@classmethod
def tearDownClass(self):
self.uart.close()
def test_WriteAndRead1Byte_ReturnExpectedValue(self):
self.uart.write(b'\xA5')
self.uart.write(b'\x51')
time.sleep(0.1)
bytes_to_read = 1
return_value = self.uart.read(bytes_to_read)
self.assertEqual(return_value, b'\xAA')
| en | 0.609713 | Use GY25 chip. Hardware setup: 1. Connect connector A UART.RX (DIO16) to UART.TX of a device. 2. Connect connector A UART.TX (DIO17) to UART.RX of a device. | 2.885855 | 3 |
pytelebirr/__init__.py | TeleBirrApi/PyTeleBirr | 30 | 6612268 | from .pytelebirr import PyTeleBirr
| from .pytelebirr import PyTeleBirr
| none | 1 | 0.977711 | 1 | |
releng/tests/test_models.py | VanirLab/VOS | 0 | 6612269 | <gh_stars>0
from django.test import TestCase
from releng.models import Release
class RelengTest(TestCase):
fixtures = ['releng/fixtures/release.json']
def setUp(self):
self.release = Release.objects.first()
def test_feed(self):
response = self.client.get('/feeds/releases/')
self.assertEqual(response.status_code, 200)
def test_absolute_url(self):
self.assertIn(self.release.version, self.release.get_absolute_url())
def test_iso_url(self):
url = self.release.iso_url()
ver = self.release.version
expected = 'iso/{}/archlinux-{}-x86_64.iso'.format(ver, ver)
self.assertEqual(url, expected)
def test_info_html(self):
self.assertIn(self.release.info, self.release.info_html())
def test_dir_path(self):
dir_path = u'iso/{}/'.format(self.release.version)
self.assertEqual(dir_path, self.release.dir_path())
def test_sitemap(self):
response = self.client.get('/sitemap-releases.xml')
self.assertEqual(response.status_code, 200)
| from django.test import TestCase
from releng.models import Release
class RelengTest(TestCase):
fixtures = ['releng/fixtures/release.json']
def setUp(self):
self.release = Release.objects.first()
def test_feed(self):
response = self.client.get('/feeds/releases/')
self.assertEqual(response.status_code, 200)
def test_absolute_url(self):
self.assertIn(self.release.version, self.release.get_absolute_url())
def test_iso_url(self):
url = self.release.iso_url()
ver = self.release.version
expected = 'iso/{}/archlinux-{}-x86_64.iso'.format(ver, ver)
self.assertEqual(url, expected)
def test_info_html(self):
self.assertIn(self.release.info, self.release.info_html())
def test_dir_path(self):
dir_path = u'iso/{}/'.format(self.release.version)
self.assertEqual(dir_path, self.release.dir_path())
def test_sitemap(self):
response = self.client.get('/sitemap-releases.xml')
self.assertEqual(response.status_code, 200) | none | 1 | 2.170761 | 2 | |
{{cookiecutter.project_name}}/server/apps/main/migrations/0001_initial.py | oxyum/wemake-django-template | 1,489 | 6612270 | <filename>{{cookiecutter.project_name}}/server/apps/main/migrations/0001_initial.py
# Generated by Django 2.2.7 on 2019-11-24 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
"""Initial migration that creates the example BlogPost model."""
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('title', models.CharField(max_length=80)),
('body', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'BlogPost',
'verbose_name_plural': 'BlogPosts',
},
),
]
| <filename>{{cookiecutter.project_name}}/server/apps/main/migrations/0001_initial.py
# Generated by Django 2.2.7 on 2019-11-24 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
"""Initial migration that creates the example BlogPost model."""
initial = True
dependencies = []
operations = [
migrations.CreateModel(
name='BlogPost',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
('title', models.CharField(max_length=80)),
('body', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': 'BlogPost',
'verbose_name_plural': 'BlogPosts',
},
),
]
| en | 0.777946 | # Generated by Django 2.2.7 on 2019-11-24 11:01 Initial migration that creates the example BlogPost model. | 1.758677 | 2 |
snpdb/management/commands/fix_vcf_cohort_mismatch.py | SACGF/variantgrid | 5 | 6612271 | <reponame>SACGF/variantgrid<gh_stars>1-10
import logging
import os
import cyvcf2
from django.core.management import BaseCommand
from snpdb.models import CohortGenotypeCollection
from upload.models import UploadedVCF
class Command(BaseCommand):
def handle(self, *args, **options):
for cgc in CohortGenotypeCollection.objects.filter(cohort__vcf__isnull=False):
cohort = cgc.cohort
vcf = cohort.vcf
if not vcf.has_genotype:
continue # Will only have 1 sample
try:
filename = vcf.uploadedvcf.uploaded_file.get_filename()
except UploadedVCF.DoesNotExist:
logging.info("VCF %d missing uploadedvcf", vcf.pk)
continue
if os.path.exists(filename):
cohort_samples_by_vcf_sample_name = {cs.sample.vcf_sample_name: cs for cs in
cohort.cohortsample_set.all()}
reader = cyvcf2.VCF(filename)
num_vcf_samples = len(reader.samples)
if cg := cgc.cohortgenotype_set.first():
num_cgc_samples = len(cg.samples_zygosity)
if num_cgc_samples != num_vcf_samples:
logging.warning("VCF %d, VCF samples: %d, CohortGenotype samples: %d",
vcf.pk, num_vcf_samples, num_cgc_samples)
for i, sample_name in enumerate(reader.samples):
if cs := cohort_samples_by_vcf_sample_name.get(sample_name):
if cs.cohort_genotype_packed_field_index != i:
print(f"{sample_name} {cs.cohort_genotype_packed_field_index} -> {i}")
else:
logging.warning("VCF %d, sample_name '%s' deleted", vcf.pk, sample_name)
else:
logging.warning("VCF %d, filename '%s' not found", vcf.pk, filename)
| import logging
import os
import cyvcf2
from django.core.management import BaseCommand
from snpdb.models import CohortGenotypeCollection
from upload.models import UploadedVCF
class Command(BaseCommand):
def handle(self, *args, **options):
for cgc in CohortGenotypeCollection.objects.filter(cohort__vcf__isnull=False):
cohort = cgc.cohort
vcf = cohort.vcf
if not vcf.has_genotype:
continue # Will only have 1 sample
try:
filename = vcf.uploadedvcf.uploaded_file.get_filename()
except UploadedVCF.DoesNotExist:
logging.info("VCF %d missing uploadedvcf", vcf.pk)
continue
if os.path.exists(filename):
cohort_samples_by_vcf_sample_name = {cs.sample.vcf_sample_name: cs for cs in
cohort.cohortsample_set.all()}
reader = cyvcf2.VCF(filename)
num_vcf_samples = len(reader.samples)
if cg := cgc.cohortgenotype_set.first():
num_cgc_samples = len(cg.samples_zygosity)
if num_cgc_samples != num_vcf_samples:
logging.warning("VCF %d, VCF samples: %d, CohortGenotype samples: %d",
vcf.pk, num_vcf_samples, num_cgc_samples)
for i, sample_name in enumerate(reader.samples):
if cs := cohort_samples_by_vcf_sample_name.get(sample_name):
if cs.cohort_genotype_packed_field_index != i:
print(f"{sample_name} {cs.cohort_genotype_packed_field_index} -> {i}")
else:
logging.warning("VCF %d, sample_name '%s' deleted", vcf.pk, sample_name)
else:
logging.warning("VCF %d, filename '%s' not found", vcf.pk, filename) | en | 0.977625 | # Will only have 1 sample | 2.212806 | 2 |
main.py | fangwangme/ip_checker | 0 | 6612272 | <filename>main.py
#! /usr/bin/python3
# coding:utf-8
"""
@author:<NAME>
@date:2021-09-21
@desc:
"""
import json
from flask import Flask, request, jsonify
from logger import logger
app = Flask(__name__)
@app.route("/ip")
def check_ip():
request_ip = request.remote_addr
logger.info("Got a request from {} with headers {}".format(request_ip, json.dumps(dict(request.headers))))
return jsonify({"ip": request_ip})
@app.route("/")
def hello_world():
request_ip = request.remote_addr
logger.info("Got a request from {} with headers {}".format(request_ip, json.dumps(dict(request.headers))))
return "<p>Hello, World!</p>"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=False)
| <filename>main.py
#! /usr/bin/python3
# coding:utf-8
"""
@author:<NAME>
@date:2021-09-21
@desc:
"""
import json
from flask import Flask, request, jsonify
from logger import logger
app = Flask(__name__)
@app.route("/ip")
def check_ip():
request_ip = request.remote_addr
logger.info("Got a request from {} with headers {}".format(request_ip, json.dumps(dict(request.headers))))
return jsonify({"ip": request_ip})
@app.route("/")
def hello_world():
request_ip = request.remote_addr
logger.info("Got a request from {} with headers {}".format(request_ip, json.dumps(dict(request.headers))))
return "<p>Hello, World!</p>"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=False)
| en | 0.452204 | #! /usr/bin/python3 # coding:utf-8 @author:<NAME> @date:2021-09-21 @desc: | 3.112752 | 3 |
DES_weather_analysis/clustring_kmediod_operation.py | zahraghh/DES_weather_analysis | 0 | 6612273 | import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import datetime
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import DES_weather_analysis
from DES_weather_analysis import clustring_kmean_forced, clustring_kmediod_PCA_operation, EPW_to_csv,solar_irradiance,solar_position
from DES_weather_analysis.solar_irradiance import aoi, get_total_irradiance
from DES_weather_analysis.solar_position import get_solarposition
JtokWh = 2.7778e-7
def kmedoid_clusters(path_test,mode):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
scenario_reduction_path= os.path.join(path_test,'ScenarioReduction')
scenarios_path = os.path.join(path_test,'ScenarioGeneration')
if not os.path.exists(scenario_reduction_path):
os.makedirs(scenario_reduction_path)
representative_days_path = scenario_reduction_path
num_scenario = 0
num_scenarios = int(editable_data['num_scenarios'])
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
altitude = float(editable_data['Altitude']) #SLC altitude m
surf_tilt = float(editable_data['solar_tilt']) #panels tilt degree
surf_azimuth = float(editable_data['solar_azimuth']) #panels azimuth degree
idf_names= []
thermal_eff_dict= {}
weight_factor={}
for i in range(int(editable_data['number_buildings'])):
if 'building_name_'+str(i+1) in editable_data.keys():
building_name = editable_data['building_name_'+str(i+1)]
idf_names.append(building_name)
thermal_eff_dict[building_name]=float(editable_data['thermal_eff_'+str(i+1)])
weight_factor[building_name]=float(editable_data['WF_'+str(i+1)])
#idf_names=idf_names[1:2]
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
dict_EPWs = {}
list_years = []
list_tmys =[]
list_fmys = []
for year in reversed(range(start_year,end_year+1)):
weather_data = city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year)
list_years.append(weather_data)
for i in range(5):
if 'TMY'+str(i+1)+'_name' in editable_data.keys():
TMY_name = editable_data['TMY'+str(i+1)+'_name']
list_tmys.append(TMY_name)
if 'FMY'+str(i+1)+'_name' in editable_data.keys():
FMY_name = editable_data['FMY'+str(i+1)+'_name']
list_fmys.append(FMY_name)
dict_EPWs['AMYs']=list_years
dict_EPWs['FMYs']=list_fmys
dict_EPWs['TMYs']=list_tmys
global k
def scenario_reduction_per_year(scenario_genrated,name,weather_data):
global k
days= 365
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
scenario_probability = [1]*365
k = 0
#print(scenario_genrated)
for i in range(days):
data_new = scenario_genrated[i*24:(i+1)*24]
#print(data_new.keys())
data_1 = data_new['Total Electricity']
data_2 = data_new['Total Heating']
#print(data_1)
#print(name,i,k,data_1[15],data_2[15])
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,30,1)
if search_optimum_cluster=='yes' and name== 'total_'+dict_EPWs['TMYs'][-1]+'_':
print('Defining the optimum number of clusters: ')
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (15,10)
fig, ax = plt.subplots(figsize=(15, 10))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
#ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,30,1))
plt.savefig(os.path.join(path_test, 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled[scenario].reshape(1,-1)))
#print(data_all_labels)
A_scaled_list[scenario]=A_scaled[scenario].tolist()
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path , name+ 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path , name + 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
Scenario_generated_new = standardization_data.inverse_transform(kmedoids.cluster_centers_)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#max_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[2]
#print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios))
design_day_heating = []
design_day_electricity = []
heating_dd = []
for i in range(24):
if len(max_electricity_scenarios_nested_list[i])==1:
design_day_electricity.append(max_electricity_scenarios_nested_list[i][0])
else:
try:
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
except:
design_day_electricity.append(0)
#print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i])
if len(max_heating_scenarios_nested_list[i])==1:
heating_dd.append(max_heating_scenarios_nested_list[i][0])
design_day_heating.append(np.max(heating_dd))
else:
try:
heating_dd = [j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]
design_day_heating.append(np.max(heating_dd))
except:
design_day_heating.append(0)
for i in range(24):
if design_day_electricity[i]==0:
if i==0:
design_day_electricity[i] = design_day_electricity[i+1]
elif i==23:
design_day_electricity[i] = design_day_electricity[i-1]
else:
design_day_electricity[i] = (design_day_electricity[i-1]+design_day_electricity[i+1])/2
if design_day_heating[i]==0:
if i==0:
design_day_heating[i] = design_day_heating[i+1]
elif i==23:
design_day_heating[i] = design_day_heating[i-1]
else:
design_day_heating[i] = (design_day_heating[i-1]+design_day_heating[i+1])/2
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48): #24*5=120 features in each day
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#zmax_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
all_representative_days = clustring_kmean_forced.kmedoid_clusters(path_test,scenario_genrated,name)[2]
represent_day = defaultdict(list)
k=0
days= 365
for represent in range(int(editable_data['Cluster numbers'])+2):
for day in range(days):
data = scenario_genrated[day*24:(day+1)*24]
data_1 = data['Total Electricity']
data_2 = data['Total Heating']
#Total electricity and heating
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
#if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0):
# print('elect',represent, day, round(all_representative_days[represent]['Electricity total (kWh)'][10],0),round(daily_list[10],0))
#if round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0):
# print('heat',represent, day, round(all_representative_days[represent]['Heating (kWh)'][6],0),round(daily_list[30],0))
if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0) and round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0) :
represent_day[represent] = day
data_temp = []
data_dni = []
data_ghi = []
data_dhi = []
data_wind_speed = []
poa_components_vector = []
poa_global = []
hour = 0
for index_in_year in range(day*24,(day+1)*24):
data_temp.append(weather_data['temp_air'].tolist()[index_in_year])
data_dni.append(weather_data['dni'].tolist()[index_in_year])
data_ghi.append(weather_data['ghi'].tolist()[index_in_year])
data_dhi.append(weather_data['dhi'].tolist()[index_in_year])
data_wind_speed.append(weather_data['wind_speed'].tolist()[index_in_year])
dti = datetime.datetime(weather_data['year'].tolist()[index_in_year], weather_data['month'].tolist()[index_in_year], weather_data['day'].tolist()[index_in_year],hour)
solar_position = get_solarposition(dti,lat, lon, altitude, pressure=None, method='nrel_numpy', temperature=12)
solar_zenith = solar_position['zenith']
solar_azimuth = solar_position['azimuth']
poa_components_vector.append(get_total_irradiance(surf_tilt, surf_azimuth,
solar_zenith[0], solar_azimuth[0],
float(weather_data['dni'].tolist()[index_in_year]), float(weather_data['ghi'].tolist()[index_in_year]), float(weather_data['dhi'].tolist()[index_in_year]), dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990'))
poa_global.append(poa_components_vector[hour]['poa_global'])
hour +=1
for represent in range(int(editable_data['Cluster numbers'])+2):
all_representative_days[represent]['temp_air']=data_temp
all_representative_days[represent]['dni']=data_dni
all_representative_days[represent]['ghi']=data_ghi
all_representative_days[represent]['dhi']=data_dhi
all_representative_days[represent]['wind_speed']=data_wind_speed
all_representative_days[represent]['gti']=poa_global
all_representative_days[represent].to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(represent)+ '.csv'), index=False)
break
return data_all_labels, represent_day
cluster_numbers= int(editable_data['Cluster numbers'])+2
temps= []
gtis=[]
for scenario in range(len(epw_names)):
#output_prefix = building_type+'_'+epw_names[scenario]+'_'
weather_path = os.path.join(scenarios_path,epw_names[scenario]+'.csv')
data = pd.read_csv(weather_path)
if scenario<10:
gtis.append(round(np.mean(data['GTI']),1))
#print(epw_names[scenario],'GTI',np.mean(data['GTI']))
if scenario%10==0:
#print(epw_names[scenario],'Temp',np.mean(data['Temperature']))
temps.append(round(np.mean(data['Temperature']),1))
print('gti', gtis)
print('temps',temps)
scenario_generated_main = defaultdict(list)
elect_buildings_main = defaultdict(list)
gas_buildings_main = defaultdict(list)
elect_annual_main = defaultdict(list)
gas_annual_main = defaultdict(list)
for building_type in idf_names:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = building_type+'_'+epw_file_name+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = ((data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'])*JtokWh)
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
#print(output_prefix,elect_data,heat_data )
#data['Total Electricity']=elect_data
#data['Total Heating']=heat_data
scenario_generated_main[building_type].append(data)
elect_buildings_main[building_type].append(elect_data)
elect_annual_main[building_type].append(sum(elect_data))
gas_buildings_main[building_type].append(heat_data)
gas_annual_main[building_type].append(sum(heat_data))
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
if key =='AMYs':
weather_path = os.path.join(scenarios_path,epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
elif key =='FMYs':
weather_path = os.path.join(os.path.join(os.path.join(path_test,'Weather files'),key),epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path,FMYs='yes')
else:
weather_path = os.path.join(os.path.join(os.path.join(path_test,'Weather files'),key),epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
data.to_csv(os.path.join(scenarios_path,epw_file_name+'.csv'), index = False, header=True)
total_electricity_buildings = []
total_heating_buildings = []
for building_type in idf_names:
if mode=='seperate':
output_prefix = building_type+'_'+epw_file_name+'_'
scenario_generated_main[building_type][j]['Total Electricity']=elect_buildings_main[building_type][j]*weight_factor[building_type]
scenario_generated_main[building_type][j]['Total Heating']=gas_buildings_main[building_type][j]*weight_factor[building_type]
scenario_reduction_per_year(scenario_generated_main[building_type][j],output_prefix,data)
elif mode=='total':
#print(building_type,'elect',elect_buildings_main[building_type][j]*weight_factor[building_type])
#print(building_type,'heat',gas_buildings_main[building_type][j]*weight_factor[building_type])
total_electricity_buildings.append(elect_buildings_main[building_type][j]*weight_factor[building_type])
total_heating_buildings.append(gas_buildings_main[building_type][j]*weight_factor[building_type])
if mode=='total':
output_prefix = 'total_'+epw_file_name+'_'
scenario_generated_main[building_type][j]['Total Electricity']=sum(total_electricity_buildings)
scenario_generated_main[building_type][j]['Total Heating']=sum(total_heating_buildings)
#print('total',j,output_prefix,sum(total_electricity_buildings),sum(total_heating_buildings))
#print(total_electricity_buildings[0][15],total_electricity_buildings[1][15],total_electricity_buildings[2][15],sum(total_electricity_buildings)[15],len(sum(total_electricity_buildings)))
#print(len(scenario_generated_main[building_type][j]))
scenario_reduction_per_year(scenario_generated_main[building_type][j],output_prefix,data)
j = j+1
scenario_probability = defaultdict(list)
scenario_generated = defaultdict(list)
elect_buildings = defaultdict(list)
gas_buildings = defaultdict(list)
elect_annual= defaultdict(list)
gas_annual = defaultdict(list)
for building_type in idf_names:
for scenario in range(len(epw_names)):
output_prefix = building_type+'_'+epw_names[scenario]+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
#data['Total Electricity']=elect_data
#data['Total Heating']=heat_data
scenario_generated[building_type].append(data)
scenario_generated[building_type].append(data)
elect_buildings[building_type].append(elect_data)
elect_annual[building_type].append(sum(elect_data))
gas_buildings[building_type].append(heat_data)
gas_annual[building_type].append(sum(heat_data))
#print(scenario,output_prefix,gas_buildings[building_type][scenario][0],elect_buildings[building_type][scenario][0])
for scenario in range(len(epw_names)):
output_prefix = building_type+'_'+epw_names[scenario]+'_'
weather_path = os.path.join(scenarios_path,epw_names[scenario]+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
data.to_csv(os.path.join(scenarios_path,epw_file_name+'.csv'), index = False, header=True)
total_electricity_buildings = []
total_heating_buildings = []
for building_type in idf_names:
if mode=='seperate':
output_prefix = building_type+'_'+epw_names[scenario]+'_'
scenario_generated[building_type][scenario]['Total Electricity']=elect_buildings[building_type][scenario]*weight_factor[building_type]
scenario_generated[building_type][scenario]['Total Heating']=gas_buildings[building_type][scenario]*weight_factor[building_type]
scenario_reduction_per_year(scenario_generated[building_type][scenario],output_prefix,data)
elif mode=='total':
total_electricity_buildings.append(elect_buildings[building_type][scenario]*weight_factor[building_type])
total_heating_buildings.append(gas_buildings[building_type][scenario]*weight_factor[building_type])
if mode=='total':
output_prefix = 'total_'+epw_names[scenario]+'_'
scenario_generated[building_type][scenario]['Total Electricity']=sum(total_electricity_buildings)
scenario_generated[building_type][scenario]['Total Heating']=sum(total_heating_buildings)
#print(scenario_generated[building_type][scenario].keys())
scenario_reduction_per_year(scenario_generated[building_type][scenario],output_prefix,data)
| import numpy as np
import pandas as pd
import os
import sys
import matplotlib.pyplot as plt
import matplotlib
import datetime
import sklearn.datasets, sklearn.decomposition
from sklearn.cluster import KMeans
from sklearn_extra.cluster import KMedoids
from sklearn.preprocessing import StandardScaler
import sklearn_extra
from scipy import stats
from scipy.stats import kurtosis, skew
from collections import defaultdict
import statistics
from itertools import chain
from scipy.interpolate import interp1d
from collections import defaultdict
from nested_dict import nested_dict
import DES_weather_analysis
from DES_weather_analysis import clustring_kmean_forced, clustring_kmediod_PCA_operation, EPW_to_csv,solar_irradiance,solar_position
from DES_weather_analysis.solar_irradiance import aoi, get_total_irradiance
from DES_weather_analysis.solar_position import get_solarposition
JtokWh = 2.7778e-7
def kmedoid_clusters(path_test,mode):
editable_data_path =os.path.join(path_test, 'editable_values.csv')
editable_data = pd.read_csv(editable_data_path, header=None, index_col=0, squeeze=True).to_dict()[1]
scenario_reduction_path= os.path.join(path_test,'ScenarioReduction')
scenarios_path = os.path.join(path_test,'ScenarioGeneration')
if not os.path.exists(scenario_reduction_path):
os.makedirs(scenario_reduction_path)
representative_days_path = scenario_reduction_path
num_scenario = 0
num_scenarios = int(editable_data['num_scenarios'])
city=editable_data['city']
lat = float(editable_data['Latitude'])
lon = float(editable_data['Longitude'])
altitude = float(editable_data['Altitude']) #SLC altitude m
surf_tilt = float(editable_data['solar_tilt']) #panels tilt degree
surf_azimuth = float(editable_data['solar_azimuth']) #panels azimuth degree
idf_names= []
thermal_eff_dict= {}
weight_factor={}
for i in range(int(editable_data['number_buildings'])):
if 'building_name_'+str(i+1) in editable_data.keys():
building_name = editable_data['building_name_'+str(i+1)]
idf_names.append(building_name)
thermal_eff_dict[building_name]=float(editable_data['thermal_eff_'+str(i+1)])
weight_factor[building_name]=float(editable_data['WF_'+str(i+1)])
#idf_names=idf_names[1:2]
start_year = int(editable_data['starting_year'])
end_year = int(editable_data['ending_year'])
epw_names = []
for i_temp in range(num_scenarios):
for i_solar in range(num_scenarios):
epw_names.append('T_'+str(i_temp)+'_S_'+str(i_solar))
demand_directory = os.path.join(path_test, 'IDFBuildingsFiles')
output_directory = os.path.join(path_test, 'IDFBuildingsFiles')
# epw main files
dict_EPWs = {}
list_years = []
list_tmys =[]
list_fmys = []
for year in reversed(range(start_year,end_year+1)):
weather_data = city+'_'+str(lat)+'_'+str(lon)+'_psm3_60_'+str(year)
list_years.append(weather_data)
for i in range(5):
if 'TMY'+str(i+1)+'_name' in editable_data.keys():
TMY_name = editable_data['TMY'+str(i+1)+'_name']
list_tmys.append(TMY_name)
if 'FMY'+str(i+1)+'_name' in editable_data.keys():
FMY_name = editable_data['FMY'+str(i+1)+'_name']
list_fmys.append(FMY_name)
dict_EPWs['AMYs']=list_years
dict_EPWs['FMYs']=list_fmys
dict_EPWs['TMYs']=list_tmys
global k
def scenario_reduction_per_year(scenario_genrated,name,weather_data):
global k
days= 365
features_scenarios = defaultdict(list)
features_scenarios_list = []
features_probability_list = []
features_scenarios_nested = nested_dict()
scenario_probability = [1]*365
k = 0
#print(scenario_genrated)
for i in range(days):
data_new = scenario_genrated[i*24:(i+1)*24]
#print(data_new.keys())
data_1 = data_new['Total Electricity']
data_2 = data_new['Total Heating']
#print(data_1)
#print(name,i,k,data_1[15],data_2[15])
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
features_scenarios[k*days+i] = daily_list
features_scenarios_nested[i] = features_scenarios[k*days+i]
features_scenarios_list.append(features_scenarios[k*days+i])
features_probability_list.append(scenario_probability[i])
k = k+1
A = np.asarray(features_scenarios_list)
#Convert the dictionary of features to Series
standardization_data = StandardScaler()
A_scaled = standardization_data.fit_transform(A)
inertia_list = []
search_optimum_cluster = editable_data['Search optimum clusters'] # if I want to search for the optimum number of clusters: 1 is yes, 0 is no
cluster_range = range(2,30,1)
if search_optimum_cluster=='yes' and name== 'total_'+dict_EPWs['TMYs'][-1]+'_':
print('Defining the optimum number of clusters: ')
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 24
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.grid'] = False
plt.rcParams['axes.edgecolor'] = 'black'
cmap = plt.cm.RdYlGn
plt.rcParams["figure.figsize"] = (15,10)
fig, ax = plt.subplots(figsize=(15, 10))
for cluster_numbers in cluster_range:
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=0).fit(A_scaled)
inertia_list.append(kmedoids.inertia_)
plt.scatter(cluster_numbers,kmedoids.inertia_)
print('Cluster number:', cluster_numbers, ' Inertia of the cluster:', int(kmedoids.inertia_))
ax.set_xlabel('Number of clusters',fontsize=BIGGER_SIZE)
ax.set_ylabel('Inertia',fontsize=BIGGER_SIZE)
#ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE)
ax.plot(list(cluster_range),inertia_list)
ax.set_xticks(np.arange(2,30,1))
plt.savefig(os.path.join(path_test, 'Inertia vs Clusters.png'),dpi=300,facecolor='w')
plt.close()
print('"Inertia vs Clusters" figure is saved in the directory folder')
print('You can use the figure to select the optimum number of clusters' )
print('You should enter the new optimum number of clusters in EditableFile.csv file and re-run this part')
cluster_numbers= int(editable_data['Cluster numbers'])
kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(A_scaled)
#kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca)
label = kmedoids.fit_predict(A_scaled)
#filter rows of original data
probability_label = defaultdict(list)
index_label = defaultdict(list)
index_label_all = []
filtered_label={}
for i in range(cluster_numbers):
filtered_label[i] = A_scaled[label == i]
index_cluster=np.where(label==i)
if len(filtered_label[i])!=0:
index_cluster = index_cluster[0]
for j in index_cluster:
probability_label[i].append(features_probability_list[j])
index_label[i].append(j)
index_label_all.append(j)
else:
probability_label[i].append(0)
sum_probability = []
for key in probability_label.keys():
sum_probability.append(sum(probability_label[key]))
#print(kmedoids.predict([[0,0,0], [4,4,4]]))
#print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_))
A_scaled_list={}
clusters={}
clusters_list = []
label_list = []
data_labels={}
data_all_labels = defaultdict(list)
for center in range(len(kmedoids.cluster_centers_)):
clusters['cluster centers '+str(center)]= kmedoids.cluster_centers_[center]
clusters_list.append(kmedoids.cluster_centers_[center].tolist())
for scenario in range(len(A_scaled)):
data_all_labels[kmedoids.labels_[scenario]].append(standardization_data.inverse_transform(A_scaled[scenario].reshape(1,-1)))
#print(data_all_labels)
A_scaled_list[scenario]=A_scaled[scenario].tolist()
A_scaled_list[scenario].insert(0,kmedoids.labels_[scenario])
data_labels['labels '+str(scenario)]= A_scaled_list[scenario]
label_list.append(A_scaled[scenario].tolist())
df_clusters= pd.DataFrame(clusters)
df_labels = pd.DataFrame(data_labels)
df_clusters.to_csv(os.path.join(representative_days_path , name+ 'cluster_centers_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
df_labels.to_csv(os.path.join(representative_days_path , name + 'labels_C_'+str(len(kmedoids.cluster_centers_))+'_L_'+str(len(kmedoids.labels_))+'.csv'), index=False)
#Reversing PCA using two methods:
#Reversing the cluster centers using method 1 (their results are the same)
Scenario_generated_new = standardization_data.inverse_transform(kmedoids.cluster_centers_)
#print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0])
representative_day_all = {}
total_labels = []
represent_gaps = {}
scenario_data = {}
for key in filtered_label.keys():
total_labels.append(len(filtered_label[key]))
#print(len(probability_label[0])) 1990
#print(len(filtered_label[0])) 1990
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48):
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#max_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
max_heating_scenarios_nested = nested_dict()
max_electricity_scenarios_nested = nested_dict()
total_heating_scenarios = []
total_electricity_scenarios = []
max_electricity_scenarios_nested_list = defaultdict(list)
max_heating_scenarios_nested_list = defaultdict(list)
accuracy_design_day = 0.99
design_day_heating = []
design_day_electricity = []
representative_day_max = {}
electricity_design_day = {}
heating_design_day = {}
for day in range(365):
for i in range(24):
k_elect=0
list_k_electricity = []
k_heat=0
list_k_heating = []
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
if features_scenarios_nested[day][0:24][i]>electricity_demand[i]:
k_elect=1
list_k_electricity.append(k_elect)
k_elect=0
if features_scenarios_nested[day][24:48][i]>heating_demand[i]:
k_heat=1
list_k_heating.append(k_heat)
k_heat=0
if sum(list_k_electricity)==cluster_numbers: #This hour does not meet by any of the representative days
max_electricity_scenarios_nested_list[i].append(features_scenarios_nested[day][0:24][i])
total_electricity_scenarios.append(features_scenarios_nested[day][0:24][i])
if sum(list_k_heating)==cluster_numbers: #This hour does not meet by any of the representative days
max_heating_scenarios_nested_list[i].append(features_scenarios_nested[day][24:48][i])
total_heating_scenarios.append(features_scenarios_nested[day][24:48][i])
total_electricity_scenarios.sort(reverse=True)
total_heating_scenarios.sort(reverse=True)
max_electricity_hour = total_electricity_scenarios[35]
max_heating_hour = total_heating_scenarios[2]
#print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios))
design_day_heating = []
design_day_electricity = []
heating_dd = []
for i in range(24):
if len(max_electricity_scenarios_nested_list[i])==1:
design_day_electricity.append(max_electricity_scenarios_nested_list[i][0])
else:
try:
design_day_electricity.append(np.max([j for j in max_electricity_scenarios_nested_list[i] if j<max_electricity_hour]))
except:
design_day_electricity.append(0)
#print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i])
if len(max_heating_scenarios_nested_list[i])==1:
heating_dd.append(max_heating_scenarios_nested_list[i][0])
design_day_heating.append(np.max(heating_dd))
else:
try:
heating_dd = [j for j in max_heating_scenarios_nested_list[i] if j<max_heating_hour]
design_day_heating.append(np.max(heating_dd))
except:
design_day_heating.append(0)
for i in range(24):
if design_day_electricity[i]==0:
if i==0:
design_day_electricity[i] = design_day_electricity[i+1]
elif i==23:
design_day_electricity[i] = design_day_electricity[i-1]
else:
design_day_electricity[i] = (design_day_electricity[i-1]+design_day_electricity[i+1])/2
if design_day_heating[i]==0:
if i==0:
design_day_heating[i] = design_day_heating[i+1]
elif i==23:
design_day_heating[i] = design_day_heating[i-1]
else:
design_day_heating[i] = (design_day_heating[i-1]+design_day_heating[i+1])/2
representative_day_max = {}
electricity_demand_total = defaultdict(list)
heating_demand_total = defaultdict(list)
heating_demand_max = {}
electricity_demand_max = {}
for represent in range(cluster_numbers):
representative_day_max[represent] = pd.read_csv(os.path.join(representative_days_path ,name+'Represent_days_modified_'+str(represent)+'.csv'))
electricity_demand = representative_day_max[represent]['Electricity total (kWh)'] #kWh
heating_demand = representative_day_max[represent]['Heating (kWh)'] #kWh
#hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760
heating_demand_max[represent]= np.mean(heating_demand)
electricity_demand_max[represent]= np.mean(electricity_demand)
high_electricity_index = []
high_heating_index = []
high_electricity_value = []
high_heating_value = []
key_max_electricity=max(electricity_demand_max, key=electricity_demand_max.get)
key_max_heating=max(heating_demand_max, key=heating_demand_max.get)
for key, value in max_electricity_scenarios_nested.items():
for inner_key, inner_value in max_electricity_scenarios_nested[key].items():
if inner_value>electricity_demand_max[key_max_electricity]:
high_electricity_index.append(scenario_number[key]*365+inner_key)
high_electricity_value.append(inner_value)
for key, value in max_heating_scenarios_nested.items():
for inner_key, inner_value in max_heating_scenarios_nested[key].items():
if inner_value>heating_demand_max[key_max_heating]:
high_heating_index.append(scenario_number[key]*365+inner_key)
high_heating_value.append(inner_value)
sum_probability.append(0.5*len(total_electricity_scenarios)/len(index_label_all)*365)
sum_probability.append(len(total_heating_scenarios)/len(index_label_all)*365)
filtered_label[cluster_numbers]=len(total_electricity_scenarios)
filtered_label[cluster_numbers+1]=len(total_heating_scenarios)
representative_day = cluster_numbers
data_represent_days_modified={'Electricity total (kWh)': design_day_electricity,
'Heating (kWh)': representative_day_max[key_max_electricity]['Heating (kWh)'],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
representative_day = cluster_numbers+1
data_represent_days_modified={'Electricity total (kWh)': representative_day_max[key_max_heating]['Electricity total (kWh)'],
'Heating (kWh)': design_day_heating,
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name+'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
for representative_day in range(len(Scenario_generated_new)):
represent_gaps = {}
scenario_data = {}
for i in range(48): #24*5=120 features in each day
if Scenario_generated_new[representative_day][i]<0:
Scenario_generated_new[representative_day][i] = 0
for k in range(2): # 2 uncertain inputs
scenario_data[k] = Scenario_generated_new[representative_day][24*k:24*(k+1)].copy()
#min_non_z = np.min(np.nonzero(scenario_data[k]))
#zmax_non_z = np.max(np.nonzero(scenario_data[k]))
#represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0]
#ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), [])
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges]))
#iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:])
#for n in iranges:
# next_n = next(iranges)
# if (next_n-n) == 0: #for data gaps of 1 hour, get the average value
# scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2
# elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation
# f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]])
# for m in range(n,next_n+1):
# scenario_data[k][m+min_non_z] = f_interpol_short(m)
data_represent_days_modified={'Electricity total (kWh)': scenario_data[0],
'Heating (kWh)': scenario_data[1],
'Percent %': round(sum_probability[representative_day]*100/sum(sum_probability),4)}
#print(np.mean(Scenario_generated_new[representative_day][0:24]))
df_represent_days_modified=pd.DataFrame(data_represent_days_modified)
df_represent_days_modified.to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(representative_day)+ '.csv'), index=False)
all_representative_days = clustring_kmean_forced.kmedoid_clusters(path_test,scenario_genrated,name)[2]
represent_day = defaultdict(list)
k=0
days= 365
for represent in range(int(editable_data['Cluster numbers'])+2):
for day in range(days):
data = scenario_genrated[day*24:(day+1)*24]
data_1 = data['Total Electricity']
data_2 = data['Total Heating']
#Total electricity and heating
daily_list =list(chain(data_1.astype('float', copy=False),data_2.astype('float', copy=False)))
#if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0):
# print('elect',represent, day, round(all_representative_days[represent]['Electricity total (kWh)'][10],0),round(daily_list[10],0))
#if round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0):
# print('heat',represent, day, round(all_representative_days[represent]['Heating (kWh)'][6],0),round(daily_list[30],0))
if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0) and round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0) :
represent_day[represent] = day
data_temp = []
data_dni = []
data_ghi = []
data_dhi = []
data_wind_speed = []
poa_components_vector = []
poa_global = []
hour = 0
for index_in_year in range(day*24,(day+1)*24):
data_temp.append(weather_data['temp_air'].tolist()[index_in_year])
data_dni.append(weather_data['dni'].tolist()[index_in_year])
data_ghi.append(weather_data['ghi'].tolist()[index_in_year])
data_dhi.append(weather_data['dhi'].tolist()[index_in_year])
data_wind_speed.append(weather_data['wind_speed'].tolist()[index_in_year])
dti = datetime.datetime(weather_data['year'].tolist()[index_in_year], weather_data['month'].tolist()[index_in_year], weather_data['day'].tolist()[index_in_year],hour)
solar_position = get_solarposition(dti,lat, lon, altitude, pressure=None, method='nrel_numpy', temperature=12)
solar_zenith = solar_position['zenith']
solar_azimuth = solar_position['azimuth']
poa_components_vector.append(get_total_irradiance(surf_tilt, surf_azimuth,
solar_zenith[0], solar_azimuth[0],
float(weather_data['dni'].tolist()[index_in_year]), float(weather_data['ghi'].tolist()[index_in_year]), float(weather_data['dhi'].tolist()[index_in_year]), dni_extra=None, airmass=None,
albedo=.25, surface_type=None,
model='isotropic',
model_perez='allsitescomposite1990'))
poa_global.append(poa_components_vector[hour]['poa_global'])
hour +=1
for represent in range(int(editable_data['Cluster numbers'])+2):
all_representative_days[represent]['temp_air']=data_temp
all_representative_days[represent]['dni']=data_dni
all_representative_days[represent]['ghi']=data_ghi
all_representative_days[represent]['dhi']=data_dhi
all_representative_days[represent]['wind_speed']=data_wind_speed
all_representative_days[represent]['gti']=poa_global
all_representative_days[represent].to_csv(os.path.join(representative_days_path,name + 'Represent_days_modified_'+str(represent)+ '.csv'), index=False)
break
return data_all_labels, represent_day
cluster_numbers= int(editable_data['Cluster numbers'])+2
temps= []
gtis=[]
for scenario in range(len(epw_names)):
#output_prefix = building_type+'_'+epw_names[scenario]+'_'
weather_path = os.path.join(scenarios_path,epw_names[scenario]+'.csv')
data = pd.read_csv(weather_path)
if scenario<10:
gtis.append(round(np.mean(data['GTI']),1))
#print(epw_names[scenario],'GTI',np.mean(data['GTI']))
if scenario%10==0:
#print(epw_names[scenario],'Temp',np.mean(data['Temperature']))
temps.append(round(np.mean(data['Temperature']),1))
print('gti', gtis)
print('temps',temps)
scenario_generated_main = defaultdict(list)
elect_buildings_main = defaultdict(list)
gas_buildings_main = defaultdict(list)
elect_annual_main = defaultdict(list)
gas_annual_main = defaultdict(list)
for building_type in idf_names:
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
output_prefix = building_type+'_'+epw_file_name+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = ((data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'])*JtokWh)
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
#print(output_prefix,elect_data,heat_data )
#data['Total Electricity']=elect_data
#data['Total Heating']=heat_data
scenario_generated_main[building_type].append(data)
elect_buildings_main[building_type].append(elect_data)
elect_annual_main[building_type].append(sum(elect_data))
gas_buildings_main[building_type].append(heat_data)
gas_annual_main[building_type].append(sum(heat_data))
j=0
for key in dict_EPWs.keys():
for epw_file_name in dict_EPWs[key]:
if key =='AMYs':
weather_path = os.path.join(scenarios_path,epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
elif key =='FMYs':
weather_path = os.path.join(os.path.join(os.path.join(path_test,'Weather files'),key),epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path,FMYs='yes')
else:
weather_path = os.path.join(os.path.join(os.path.join(path_test,'Weather files'),key),epw_file_name+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
data.to_csv(os.path.join(scenarios_path,epw_file_name+'.csv'), index = False, header=True)
total_electricity_buildings = []
total_heating_buildings = []
for building_type in idf_names:
if mode=='seperate':
output_prefix = building_type+'_'+epw_file_name+'_'
scenario_generated_main[building_type][j]['Total Electricity']=elect_buildings_main[building_type][j]*weight_factor[building_type]
scenario_generated_main[building_type][j]['Total Heating']=gas_buildings_main[building_type][j]*weight_factor[building_type]
scenario_reduction_per_year(scenario_generated_main[building_type][j],output_prefix,data)
elif mode=='total':
#print(building_type,'elect',elect_buildings_main[building_type][j]*weight_factor[building_type])
#print(building_type,'heat',gas_buildings_main[building_type][j]*weight_factor[building_type])
total_electricity_buildings.append(elect_buildings_main[building_type][j]*weight_factor[building_type])
total_heating_buildings.append(gas_buildings_main[building_type][j]*weight_factor[building_type])
if mode=='total':
output_prefix = 'total_'+epw_file_name+'_'
scenario_generated_main[building_type][j]['Total Electricity']=sum(total_electricity_buildings)
scenario_generated_main[building_type][j]['Total Heating']=sum(total_heating_buildings)
#print('total',j,output_prefix,sum(total_electricity_buildings),sum(total_heating_buildings))
#print(total_electricity_buildings[0][15],total_electricity_buildings[1][15],total_electricity_buildings[2][15],sum(total_electricity_buildings)[15],len(sum(total_electricity_buildings)))
#print(len(scenario_generated_main[building_type][j]))
scenario_reduction_per_year(scenario_generated_main[building_type][j],output_prefix,data)
j = j+1
scenario_probability = defaultdict(list)
scenario_generated = defaultdict(list)
elect_buildings = defaultdict(list)
gas_buildings = defaultdict(list)
elect_annual= defaultdict(list)
gas_annual = defaultdict(list)
for building_type in idf_names:
for scenario in range(len(epw_names)):
output_prefix = building_type+'_'+epw_names[scenario]+'_mtr.csv'
demand_data_path = os.path.join(demand_directory, output_prefix)
data = pd.read_csv(demand_data_path)
elect_data = (data['Electricity:Facility [J](Hourly)']-data['Heating:Electricity [J](Hourly)'])*JtokWh
heat_data = (data['Gas:Facility [J](Hourly)']*thermal_eff_dict[building_type]+data['Heating:Electricity [J](Hourly)'])*JtokWh
#data['Total Electricity']=elect_data
#data['Total Heating']=heat_data
scenario_generated[building_type].append(data)
scenario_generated[building_type].append(data)
elect_buildings[building_type].append(elect_data)
elect_annual[building_type].append(sum(elect_data))
gas_buildings[building_type].append(heat_data)
gas_annual[building_type].append(sum(heat_data))
#print(scenario,output_prefix,gas_buildings[building_type][scenario][0],elect_buildings[building_type][scenario][0])
for scenario in range(len(epw_names)):
output_prefix = building_type+'_'+epw_names[scenario]+'_'
weather_path = os.path.join(scenarios_path,epw_names[scenario]+'.epw')
data, meta = EPW_to_csv.read_epw(weather_path)
data.to_csv(os.path.join(scenarios_path,epw_file_name+'.csv'), index = False, header=True)
total_electricity_buildings = []
total_heating_buildings = []
for building_type in idf_names:
if mode=='seperate':
output_prefix = building_type+'_'+epw_names[scenario]+'_'
scenario_generated[building_type][scenario]['Total Electricity']=elect_buildings[building_type][scenario]*weight_factor[building_type]
scenario_generated[building_type][scenario]['Total Heating']=gas_buildings[building_type][scenario]*weight_factor[building_type]
scenario_reduction_per_year(scenario_generated[building_type][scenario],output_prefix,data)
elif mode=='total':
total_electricity_buildings.append(elect_buildings[building_type][scenario]*weight_factor[building_type])
total_heating_buildings.append(gas_buildings[building_type][scenario]*weight_factor[building_type])
if mode=='total':
output_prefix = 'total_'+epw_names[scenario]+'_'
scenario_generated[building_type][scenario]['Total Electricity']=sum(total_electricity_buildings)
scenario_generated[building_type][scenario]['Total Heating']=sum(total_heating_buildings)
#print(scenario_generated[building_type][scenario].keys())
scenario_reduction_per_year(scenario_generated[building_type][scenario],output_prefix,data)
| en | 0.365187 | #SLC altitude m #panels tilt degree #panels azimuth degree #idf_names=idf_names[1:2] # epw main files #print(scenario_genrated) #print(data_new.keys()) #print(data_1) #print(name,i,k,data_1[15],data_2[15]) #Convert the dictionary of features to Series # if I want to search for the optimum number of clusters: 1 is yes, 0 is no # controls default text sizes # fontsize of the axes title # fontsize of the x and y labels # fontsize of the tick labels # fontsize of the tick labels # legend fontsize # fontsize of the figure title #ax.set_title('The user should use "Elbow method" to select the number of optimum clusters',fontsize=BIGGER_SIZE) #kmedoids = KMedoids(n_clusters=cluster_numbers, init="random",max_iter=1000,random_state=4).fit(scores_pca) #filter rows of original data #print(kmedoids.predict([[0,0,0], [4,4,4]])) #print(kmedoids.cluster_centers_,kmedoids.cluster_centers_[0],len(kmedoids.cluster_centers_)) #print(data_all_labels) #Reversing PCA using two methods: #Reversing the cluster centers using method 1 (their results are the same) #print('15 representative days',clusters_reverse[0][0],Scenario_generated_new[0][0],standardization_data.mean_[0],standardization_data.var_[0]) #print(len(probability_label[0])) 1990 #print(len(filtered_label[0])) 1990 # 2 uncertain inputs #min_non_z = np.min(np.nonzero(scenario_data[k])) #max_non_z = np.max(np.nonzero(scenario_data[k])) #represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0] #ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), []) #iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:]) #print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges])) #iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:]) #for n in iranges: # next_n = next(iranges) # if (next_n-n) == 0: #for data gaps of 1 hour, get the average value # scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2 # elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation # f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]]) # for m in range(n,next_n+1): # scenario_data[k][m+min_non_z] = f_interpol_short(m) #print(np.mean(Scenario_generated_new[representative_day][0:24])) #kWh #kWh #This hour does not meet by any of the representative days #This hour does not meet by any of the representative days #print(max_heating_hour,len(total_heating_scenarios),np.min(total_heating_scenarios),np.max(total_heating_scenarios)) #print(i,len(max_heating_scenarios_nested_list[i]),max_heating_scenarios_nested_list[i]) #kWh #kWh #hours_representative_day= round(sum_probability[representative_day]/sum(sum_probability),4)*8760 #24*5=120 features in each day # 2 uncertain inputs #min_non_z = np.min(np.nonzero(scenario_data[k])) #zmax_non_z = np.max(np.nonzero(scenario_data[k])) #represent_gaps[k]= [i for i, x in enumerate(scenario_data[k][min_non_z:max_non_z+1]) if x == 0] #ranges = sum((list(t) for t in zip(represent_gaps[k], represent_gaps[k][1:]) if t[0]+1 != t[1]), []) #iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:]) #print('Present gaps are: ', representative_day,k, 'gaps', ', '.join([str(n) + '-' + str(next(iranges)) for n in iranges])) #iranges = iter(represent_gaps[k][0:1] + ranges + represent_gaps[k][-1:]) #for n in iranges: # next_n = next(iranges) # if (next_n-n) == 0: #for data gaps of 1 hour, get the average value # scenario_data[k][n+min_non_z] = (scenario_data[k][min_non_z+n+1]+scenario_data[k][min_non_z+n-1])/2 # elif (next_n-n) > 0 and (next_n-n) <= 6: #for data gaps of 1 hour to 4 hr, use interpolation and extrapolation # f_interpol_short= interp1d([n-1,next_n+1], [scenario_data[k][min_non_z+n-1],scenario_data[k][min_non_z+next_n+1]]) # for m in range(n,next_n+1): # scenario_data[k][m+min_non_z] = f_interpol_short(m) #print(np.mean(Scenario_generated_new[representative_day][0:24])) #Total electricity and heating #if round(all_representative_days[represent]['Electricity total (kWh)'][10],0)==round(daily_list[10],0): # print('elect',represent, day, round(all_representative_days[represent]['Electricity total (kWh)'][10],0),round(daily_list[10],0)) #if round(all_representative_days[represent]['Heating (kWh)'][6],0)==round(daily_list[30],0): # print('heat',represent, day, round(all_representative_days[represent]['Heating (kWh)'][6],0),round(daily_list[30],0)) #output_prefix = building_type+'_'+epw_names[scenario]+'_' #print(epw_names[scenario],'GTI',np.mean(data['GTI'])) #print(epw_names[scenario],'Temp',np.mean(data['Temperature'])) #print(output_prefix,elect_data,heat_data ) #data['Total Electricity']=elect_data #data['Total Heating']=heat_data #print(building_type,'elect',elect_buildings_main[building_type][j]*weight_factor[building_type]) #print(building_type,'heat',gas_buildings_main[building_type][j]*weight_factor[building_type]) #print('total',j,output_prefix,sum(total_electricity_buildings),sum(total_heating_buildings)) #print(total_electricity_buildings[0][15],total_electricity_buildings[1][15],total_electricity_buildings[2][15],sum(total_electricity_buildings)[15],len(sum(total_electricity_buildings))) #print(len(scenario_generated_main[building_type][j])) #data['Total Electricity']=elect_data #data['Total Heating']=heat_data #print(scenario,output_prefix,gas_buildings[building_type][scenario][0],elect_buildings[building_type][scenario][0]) #print(scenario_generated[building_type][scenario].keys()) | 2.049795 | 2 |
Particle_Logging.py | sharp275/Public-Environmental-Monitor | 0 | 6612274 | <reponame>sharp275/Public-Environmental-Monitor<gh_stars>0
#Obtain data from SDS011 and upload to InfluxDB Cloud and TagoIO
from sds011 import SDS011
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
import requests
#########################################################
# USER-EDITABLE SETTINGS
# How often to measure and read data in minutes (integer between 0 and 30, 0 for continous):
measurement_period = 5
#SDS011 port connection
PORT = "/dev/ttyUSB0"
#InfluxDB Cloud settings
token = ""
org = ""
bucket = ""
InfluxDB_URL = ""
host = ""
#Tago.io settings
TAGO_DEVICE_TOKEN_STRING = ""
# END OF USER-EDITABLE SETTINGS
sds = SDS011(port=PORT)
sds.set_working_period(rate=measurement_period)#one measurment every x minutes offers decent granularity and at least a few years of lifetime to the SDS011
while (True):
try:
measurement = sds.read_measurement()
#InfluxDB Cloud Logging
with InfluxDBClient(url=InfluxDB_URL, token=token, org=org) as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
data = f"env,host={host} pm2.5={measurement['pm2.5']:.1f},pm10={measurement['pm10']:.1f}"
write_api.write(bucket, org, data)
#Start Tago logging
tago_url = "http://api.tago.io/data"
tago_header = {"Content-type": "application/json","Device-Token":TAGO_DEVICE_TOKEN_STRING}
try:
payload = [0]*2;
payload[0] = {"variable":"pm25","value":f"{measurement['pm2.5']:.1f}"}
payload[1] = {"variable":"pm10","value":f"{measurement['pm10']:.1f}"}
requests.post(tago_url, json=payload, headers=tago_header, timeout=2)
except Exception as e:
# An error has occurred, likely due to a lost internet connection,
# and the post has failed.
# The program will retry with the next data release and will succeed
# if the internet reconnects.
print("HTTP POST failed with the following error:")
print(repr(e))
print("The program will continue and retry on the next data output.")
except KeyboardInterrupt:
sds.__del__()
| #Obtain data from SDS011 and upload to InfluxDB Cloud and TagoIO
from sds011 import SDS011
from influxdb_client import InfluxDBClient, Point, WritePrecision
from influxdb_client.client.write_api import SYNCHRONOUS
import requests
#########################################################
# USER-EDITABLE SETTINGS
# How often to measure and read data in minutes (integer between 0 and 30, 0 for continous):
measurement_period = 5
#SDS011 port connection
PORT = "/dev/ttyUSB0"
#InfluxDB Cloud settings
token = ""
org = ""
bucket = ""
InfluxDB_URL = ""
host = ""
#Tago.io settings
TAGO_DEVICE_TOKEN_STRING = ""
# END OF USER-EDITABLE SETTINGS
sds = SDS011(port=PORT)
sds.set_working_period(rate=measurement_period)#one measurment every x minutes offers decent granularity and at least a few years of lifetime to the SDS011
while (True):
try:
measurement = sds.read_measurement()
#InfluxDB Cloud Logging
with InfluxDBClient(url=InfluxDB_URL, token=token, org=org) as client:
write_api = client.write_api(write_options=SYNCHRONOUS)
data = f"env,host={host} pm2.5={measurement['pm2.5']:.1f},pm10={measurement['pm10']:.1f}"
write_api.write(bucket, org, data)
#Start Tago logging
tago_url = "http://api.tago.io/data"
tago_header = {"Content-type": "application/json","Device-Token":TAGO_DEVICE_TOKEN_STRING}
try:
payload = [0]*2;
payload[0] = {"variable":"pm25","value":f"{measurement['pm2.5']:.1f}"}
payload[1] = {"variable":"pm10","value":f"{measurement['pm10']:.1f}"}
requests.post(tago_url, json=payload, headers=tago_header, timeout=2)
except Exception as e:
# An error has occurred, likely due to a lost internet connection,
# and the post has failed.
# The program will retry with the next data release and will succeed
# if the internet reconnects.
print("HTTP POST failed with the following error:")
print(repr(e))
print("The program will continue and retry on the next data output.")
except KeyboardInterrupt:
sds.__del__() | en | 0.866756 | #Obtain data from SDS011 and upload to InfluxDB Cloud and TagoIO ######################################################### # USER-EDITABLE SETTINGS # How often to measure and read data in minutes (integer between 0 and 30, 0 for continous): #SDS011 port connection #InfluxDB Cloud settings #Tago.io settings # END OF USER-EDITABLE SETTINGS #one measurment every x minutes offers decent granularity and at least a few years of lifetime to the SDS011 #InfluxDB Cloud Logging #Start Tago logging # An error has occurred, likely due to a lost internet connection, # and the post has failed. # The program will retry with the next data release and will succeed # if the internet reconnects. | 2.478268 | 2 |
committees/urls.py | jonting/volmun | 0 | 6612275 | from django.urls import path
from . import views
app_name = 'committees'
urlpatterns = [
path('', views.IndexView.as_view(), name='committees'),
path('<slug:slug>/', views.DetailView.as_view(), name='committee_detail'),
]
| from django.urls import path
from . import views
app_name = 'committees'
urlpatterns = [
path('', views.IndexView.as_view(), name='committees'),
path('<slug:slug>/', views.DetailView.as_view(), name='committee_detail'),
]
| none | 1 | 1.774375 | 2 | |
app.py | katalyzator/flask_cors_image_proxy | 0 | 6612276 | import requests
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def insta_proxy():
insta_url = request.query_string.decode('utf-8').replace('url=', '')
response = requests.get(insta_url, stream=True)
response.headers.pop('cross-origin-resource-policy')
return response.content, response.status_code, response.headers.items()
if __name__ == '__main__':
app.run()
| import requests
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def insta_proxy():
insta_url = request.query_string.decode('utf-8').replace('url=', '')
response = requests.get(insta_url, stream=True)
response.headers.pop('cross-origin-resource-policy')
return response.content, response.status_code, response.headers.items()
if __name__ == '__main__':
app.run()
| none | 1 | 2.354226 | 2 | |
bin/viz_diffenator.py | graphicore/diffbrowsers | 9 | 6612277 | """
Visualize any differences found with fontdiffenator
"""
import argparse
from diffbrowsers.gfregression import GF_PRODUCTION_URL, VIEWS
from diffbrowsers.diffbrowsers import DiffBrowsers
from diffbrowsers.browsers import test_browsers
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('fonts_after', nargs="+", help="Fonts after paths")
before_group = parser.add_argument_group(title="Fonts before input")
before_input_group = before_group.add_mutually_exclusive_group(required=True)
before_input_group.add_argument('-fb', '--fonts-before', nargs="+",
help="Fonts before paths")
before_input_group.add_argument('-gf', '--from-googlefonts', action='store_true',
help="Diff against GoogleFonts instead of fonts_before")
parser.add_argument('-u', '--gfr-url', default=GF_PRODUCTION_URL,
help="Url to GFR instance")
parser.add_argument('-l', '--gfr-local', action="store_true", default=False)
parser.add_argument('-o', '--output-dir', help="Directory for output images",
required=True)
args = parser.parse_args()
browsers_to_test = test_browsers['safari_latest']
diffbrowsers = DiffBrowsers(gfr_instance_url=args.gfr_url,
gfr_is_local=args.gfr_local,
dst_dir=args.output_dir,
browsers=browsers_to_test)
fonts_before = 'from-googlefonts' if args.from_googlefonts \
else args.fonts_before
diffbrowsers.new_session(fonts_before, args.fonts_after)
views_to_diff = diffbrowsers.gf_regression.info['diffs']
logger.info("Following diffs have been found [%s]. Genning images." % ', '.join(views_to_diff))
for view in views_to_diff:
logger.info("Generating images for {}".format(view))
if view not in VIEWS:
logger.info("Skipping view {}".format(view))
else:
diffbrowsers.diff_view(view, pt=32)
logger.info("Images saved to {}".format(args.output_dir))
if __name__ == '__main__':
main()
| """
Visualize any differences found with fontdiffenator
"""
import argparse
from diffbrowsers.gfregression import GF_PRODUCTION_URL, VIEWS
from diffbrowsers.diffbrowsers import DiffBrowsers
from diffbrowsers.browsers import test_browsers
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('fonts_after', nargs="+", help="Fonts after paths")
before_group = parser.add_argument_group(title="Fonts before input")
before_input_group = before_group.add_mutually_exclusive_group(required=True)
before_input_group.add_argument('-fb', '--fonts-before', nargs="+",
help="Fonts before paths")
before_input_group.add_argument('-gf', '--from-googlefonts', action='store_true',
help="Diff against GoogleFonts instead of fonts_before")
parser.add_argument('-u', '--gfr-url', default=GF_PRODUCTION_URL,
help="Url to GFR instance")
parser.add_argument('-l', '--gfr-local', action="store_true", default=False)
parser.add_argument('-o', '--output-dir', help="Directory for output images",
required=True)
args = parser.parse_args()
browsers_to_test = test_browsers['safari_latest']
diffbrowsers = DiffBrowsers(gfr_instance_url=args.gfr_url,
gfr_is_local=args.gfr_local,
dst_dir=args.output_dir,
browsers=browsers_to_test)
fonts_before = 'from-googlefonts' if args.from_googlefonts \
else args.fonts_before
diffbrowsers.new_session(fonts_before, args.fonts_after)
views_to_diff = diffbrowsers.gf_regression.info['diffs']
logger.info("Following diffs have been found [%s]. Genning images." % ', '.join(views_to_diff))
for view in views_to_diff:
logger.info("Generating images for {}".format(view))
if view not in VIEWS:
logger.info("Skipping view {}".format(view))
else:
diffbrowsers.diff_view(view, pt=32)
logger.info("Images saved to {}".format(args.output_dir))
if __name__ == '__main__':
main()
| en | 0.817358 | Visualize any differences found with fontdiffenator | 2.668441 | 3 |
zobrinth hash test.py | GimLala/ChessAI | 0 | 6612278 | <gh_stars>0
import random
zobTable = [[[random.randint(1,2**64 - 1) for i in range(12)]for j in range(8)]for k in range(8)]
'''for i in zobTable:
for j in i:
for k in j:
print k,
print
'''
def indexing(piece):
''' mapping each piece to a particular number'''
if (piece=='P'):
return 0
if (piece=='N'):
return 1
if (piece=='B'):
return 2
if (piece=='R'):
return 3
if (piece=='Q'):
return 4
if (piece=='K'):
return 5
if (piece=='p'):
return 6
if (piece=='n'):
return 7
if (piece=='b'):
return 8
if (piece=='r'):
return 9
if (piece=='q'):
return 10
if (piece=='k'):
return 11
else:
return -1
def computeHash(board):
h = 0
for i in range(8):
for j in range(8):
# print board[i][j]
if board[i][j] != '-':
piece = indexing(board[i][j])
h ^= zobTable[i][j][piece]
return h
def main():
# Upper Case are white pieces
# Lower Case are black pieces
# a [8][8] format board
board = [
['-', '-', '-', 'K', '-', '-', '-', '-'],
['-', 'R', '-', '-', '-', '-', 'Q', '-'],
['-', '-', '-', '-', '-', '-', '-', '-'],
['-', 'P', '-', '-', '-', '-', 'p', '-'],
['-', '-', '-', '-', '-', 'p', '-', '-'],
['-', '-', '-', '-', '-', '-', '-', '-'],
['p', '-', '-', '-', 'b', '-', '-', 'q'],
['-', '-', '-', '-', 'n', '-', '-', 'k']
]
hashValue = computeHash(board)
print("Current Board is :")
for i in board:
for j in i:
print(j, end=" ")
print()
print("\nThe Current hash is : ",hashValue,"\n")
# an exaple of channge in game state and how it affects the hashes
# move white Rook to at a new postion in right
piece = board[1][1]
board[1][1] = '-'
hashValue ^= zobTable[1][1][indexing(piece)]
board[3][1] = piece
hashValue ^= zobTable[3][1][indexing(piece)]
print("The new board is :")
for i in board:
for j in i:
print(j, end=" ")
print()
print("\nHash after the move is : ", hashValue, "\n")
if __name__ == "__main__":
main()
| import random
zobTable = [[[random.randint(1,2**64 - 1) for i in range(12)]for j in range(8)]for k in range(8)]
'''for i in zobTable:
for j in i:
for k in j:
print k,
print
'''
def indexing(piece):
''' mapping each piece to a particular number'''
if (piece=='P'):
return 0
if (piece=='N'):
return 1
if (piece=='B'):
return 2
if (piece=='R'):
return 3
if (piece=='Q'):
return 4
if (piece=='K'):
return 5
if (piece=='p'):
return 6
if (piece=='n'):
return 7
if (piece=='b'):
return 8
if (piece=='r'):
return 9
if (piece=='q'):
return 10
if (piece=='k'):
return 11
else:
return -1
def computeHash(board):
h = 0
for i in range(8):
for j in range(8):
# print board[i][j]
if board[i][j] != '-':
piece = indexing(board[i][j])
h ^= zobTable[i][j][piece]
return h
def main():
# Upper Case are white pieces
# Lower Case are black pieces
# a [8][8] format board
board = [
['-', '-', '-', 'K', '-', '-', '-', '-'],
['-', 'R', '-', '-', '-', '-', 'Q', '-'],
['-', '-', '-', '-', '-', '-', '-', '-'],
['-', 'P', '-', '-', '-', '-', 'p', '-'],
['-', '-', '-', '-', '-', 'p', '-', '-'],
['-', '-', '-', '-', '-', '-', '-', '-'],
['p', '-', '-', '-', 'b', '-', '-', 'q'],
['-', '-', '-', '-', 'n', '-', '-', 'k']
]
hashValue = computeHash(board)
print("Current Board is :")
for i in board:
for j in i:
print(j, end=" ")
print()
print("\nThe Current hash is : ",hashValue,"\n")
# an exaple of channge in game state and how it affects the hashes
# move white Rook to at a new postion in right
piece = board[1][1]
board[1][1] = '-'
hashValue ^= zobTable[1][1][indexing(piece)]
board[3][1] = piece
hashValue ^= zobTable[3][1][indexing(piece)]
print("The new board is :")
for i in board:
for j in i:
print(j, end=" ")
print()
print("\nHash after the move is : ", hashValue, "\n")
if __name__ == "__main__":
main() | en | 0.851909 | for i in zobTable: for j in i: for k in j: print k, print mapping each piece to a particular number # print board[i][j] # Upper Case are white pieces # Lower Case are black pieces # a [8][8] format board # an exaple of channge in game state and how it affects the hashes # move white Rook to at a new postion in right | 3.413072 | 3 |
katas/beta/democracy_representation.py | the-zebulan/CodeWars | 40 | 6612279 | from operator import truediv
def representation(zone_pop, rep_req):
rep_total = 0
result = []
population_total = sum(zone_pop)
for population in zone_pop:
# rep = (population / population_total) * rep_req # Python 3
rep = truediv(population, population_total) * rep_req
# current = round(rep) or 1 # Python 3
current = round(rep) if rep % 1 > 0.5 else int(rep) or 1
rep_total += current
result.append(current)
diff = rep_total - rep_req
# for _ in range(abs(int(diff))): # Python 3
for _ in xrange(abs(int(diff))):
if diff < 0:
result[result.index(min(result))] += 1
diff += 1
else:
result[result.index(max(result))] -= 1
diff -= 1
return result
| from operator import truediv
def representation(zone_pop, rep_req):
rep_total = 0
result = []
population_total = sum(zone_pop)
for population in zone_pop:
# rep = (population / population_total) * rep_req # Python 3
rep = truediv(population, population_total) * rep_req
# current = round(rep) or 1 # Python 3
current = round(rep) if rep % 1 > 0.5 else int(rep) or 1
rep_total += current
result.append(current)
diff = rep_total - rep_req
# for _ in range(abs(int(diff))): # Python 3
for _ in xrange(abs(int(diff))):
if diff < 0:
result[result.index(min(result))] += 1
diff += 1
else:
result[result.index(max(result))] -= 1
diff -= 1
return result
| en | 0.775517 | # rep = (population / population_total) * rep_req # Python 3 # current = round(rep) or 1 # Python 3 # for _ in range(abs(int(diff))): # Python 3 | 3.090819 | 3 |
lib/biocode/annotation.py | olgatsiouri1996/bio-tool | 4 | 6612280 | '''
Warning: this module requires Python 3.2 or higher
This is a set of classes to represent what I have most commonly needed when working on
dozens (eukaryotic) or hundreds (prokaryotic) of annotation projects. If you have
other attributes that you'd like to see supported, please add an 'issue' on the
biocode GitHub page.
'''
import re
class FunctionalAnnotation:
"""
While recognizing that an enormous variety of attributes could go here in
describing the functional annotation of a BioThing, I'm starting with those
we most often encounter and need to be available in common output formats.
These most common attributes are accessed by name, but the others are found
as a dict in the 'other_attributes' property.
Also, there's a place for having attributes like this abstracted, stored in
ontologies, etc. We've done all that before. For now I'm going to try
and hopefully enjoy the utility of having the most common properties always
directly, and simply available.
"""
def __init__( self, product_name=None, gene_symbol=None, go_annotations=None, ec_numbers=None, dbxrefs=None ):
self.product_name = product_name
self.gene_symbol = gene_symbol
self.go_annotations = go_annotations
self.ec_numbers = ec_numbers
self.dbxrefs = dbxrefs
self.other_attributes = dict()
if self.go_annotations is None:
self.go_annotations = list()
if self.ec_numbers is None:
self.ec_numbers = list()
if self.dbxrefs is None:
self.dbxrefs = list()
def __str__(self):
representation = "Product name: {0}\nGene symbol : {1}\n".format(self.product_name, self.gene_symbol)
if len(self.go_annotations) > 0:
representation += "GO annotations:\n"
for go_annot in self.go_annotations:
representation += "\tGO:{0}\n".format(go_annot.go_id)
else:
representation += "GO annotations: None\n"
if len(self.ec_numbers) > 0:
representation += "EC numbers:\n"
for ec in self.ec_numbers:
representation += "\t{0}\n".format(ec.number)
else:
representation += "EC numbers: None\n"
if len(self.dbxrefs) > 0:
representation += "Dbxrefs:\n"
for dbxref in self.dbxrefs:
representation += "\t{0}:{1}\n".format(dbxref.db, dbxref.identifier)
else:
representation += "Dbxrefs: None\n"
return representation
def add_dbxref(self, dbxref):
"""
Stores a Dbxref object within an annotation. The thing passed can either be a
Dbxref object or string like "sourcedb:identifier" and it will be automatically
parsed.
"""
if type(dbxref).__name__ == 'Dbxref':
self.dbxrefs.append(dbxref)
elif type(dbxref).__name__ == 'str':
m = re.match("(.+)\:(.+)", dbxref)
if m:
self.dbxrefs.append(Dbxref(db=m.group(1), identifier=m.group(2)))
else:
raise Exception("ERROR: Annotation.add_dbxref(): If string passed, expected format was 'source:identifier'")
else:
raise Exception("ERROR: Annotation.add_dbxref expected a Dbxref object or string to be passed")
def add_ec_number(self, ec_num):
"""
Note to self: Modify this to allow passing ECAnnotation object or string.
Right now it expects an ECAnnotation object
"""
self.ec_numbers.append(ec_num)
def add_go_annotation(self, go):
"""
Note to self: Modify this to allow passing GOAnnotation object or string.
Right now it expects an GOAnnotation object
"""
self.go_annotations.append(go)
class Dbxref:
"""
These allow for specification of an identifier in another database by ID. These
are not meant to be used for Ontology term linking, but rather limited only to
identifiers. Examples:
SGD:S0006169
KEGG:K06223
The first part is the 'db' and the second is the 'identifier'.
## notes on method signature overloading (for future use)
jorvis: You'd write a @classmethod. I'd name it "fromstring"
jorvis: the classmethod would parse that string into values which it would use to
invoke the normal constructor, as appropriate.
"""
def __init__( self, db=None, identifier=None ):
self.db = db
self.identifier = identifier
class GOAnnotation:
"""
A functional annotation can have an infinite number of associated GO Annotations
Details here:
http://www.geneontology.org/GO.evidence.shtml
Yes, the 'with_from' attribute name is awkward, but 'with/from' isn't legal and
both 'with' and 'from' are python reserved words.
The go_id attribute here is just the numeric portion without "GO" or "GO:" or
anything else attached (allowing the developer to define it as required.)
"""
def __init__( self, go_id=None, ev_code=None, with_from=None ):
self.go_id = go_id
self.ev_code = ev_code
self.with_from = with_from
## process any GO ID passed to only contain the numeric portion
go_pattern = re.compile('(\d+)')
m = go_pattern.search(self.go_id)
if m:
self.go_id = m.group(1)
else:
raise Exception("ERROR: failed to extract numeric portion of ID from new GOAnnotation")
class ECAnnotation:
"""
A functional annotation can have an infinite number of associated EC Annotations
Details here:
http://www.chem.qmul.ac.uk/iubmb/enzyme/
While the official terms for the levels are 'class', 'subclass' etc. we have to use
different keys for the attributes since those conflict with reserved keywords in
both python and other frameworks.
class1 = 1 = Oxidoreductases
class2 = 1.10 = Acting on diphenols and related substances as donors
class3 = 1.10.3 = With oxygen as acceptor
number = 1.10.3.2 = laccase
Currently does not have an index of EC terms to provide other attributes which will
be added in the future, such as:
accepted_name = laccase
reaction = 4 benzenediol + O2 = 4 benzosemiquinone + 2 H2O
systematic_name = benzenediol:oxygen oxidoreductase
CAS_registry_number = 80498-15-3
"""
def __init__( self, number=None ):
self.number = number
self.class1 = None
self.class2 = None
self.class3 = None
re_pattern = re.compile('(((([0-9\-]+)\.[0-9\-]+)\.[0-9\-]+)\.[a-z0-9\-]+)')
m = re_pattern.search(self.number)
if m:
self.class1 = m.group(4)
self.class2 = m.group(3)
self.class3 = m.group(2)
self.number = m.group(1)
else:
raise Exception("ERROR: Attempt to add an EC number ({0}) in unrecognized format. Expected N.N.N.N (where N can be 0-9 or a dash)".format(self.number))
| '''
Warning: this module requires Python 3.2 or higher
This is a set of classes to represent what I have most commonly needed when working on
dozens (eukaryotic) or hundreds (prokaryotic) of annotation projects. If you have
other attributes that you'd like to see supported, please add an 'issue' on the
biocode GitHub page.
'''
import re
class FunctionalAnnotation:
"""
While recognizing that an enormous variety of attributes could go here in
describing the functional annotation of a BioThing, I'm starting with those
we most often encounter and need to be available in common output formats.
These most common attributes are accessed by name, but the others are found
as a dict in the 'other_attributes' property.
Also, there's a place for having attributes like this abstracted, stored in
ontologies, etc. We've done all that before. For now I'm going to try
and hopefully enjoy the utility of having the most common properties always
directly, and simply available.
"""
def __init__( self, product_name=None, gene_symbol=None, go_annotations=None, ec_numbers=None, dbxrefs=None ):
self.product_name = product_name
self.gene_symbol = gene_symbol
self.go_annotations = go_annotations
self.ec_numbers = ec_numbers
self.dbxrefs = dbxrefs
self.other_attributes = dict()
if self.go_annotations is None:
self.go_annotations = list()
if self.ec_numbers is None:
self.ec_numbers = list()
if self.dbxrefs is None:
self.dbxrefs = list()
def __str__(self):
representation = "Product name: {0}\nGene symbol : {1}\n".format(self.product_name, self.gene_symbol)
if len(self.go_annotations) > 0:
representation += "GO annotations:\n"
for go_annot in self.go_annotations:
representation += "\tGO:{0}\n".format(go_annot.go_id)
else:
representation += "GO annotations: None\n"
if len(self.ec_numbers) > 0:
representation += "EC numbers:\n"
for ec in self.ec_numbers:
representation += "\t{0}\n".format(ec.number)
else:
representation += "EC numbers: None\n"
if len(self.dbxrefs) > 0:
representation += "Dbxrefs:\n"
for dbxref in self.dbxrefs:
representation += "\t{0}:{1}\n".format(dbxref.db, dbxref.identifier)
else:
representation += "Dbxrefs: None\n"
return representation
def add_dbxref(self, dbxref):
"""
Stores a Dbxref object within an annotation. The thing passed can either be a
Dbxref object or string like "sourcedb:identifier" and it will be automatically
parsed.
"""
if type(dbxref).__name__ == 'Dbxref':
self.dbxrefs.append(dbxref)
elif type(dbxref).__name__ == 'str':
m = re.match("(.+)\:(.+)", dbxref)
if m:
self.dbxrefs.append(Dbxref(db=m.group(1), identifier=m.group(2)))
else:
raise Exception("ERROR: Annotation.add_dbxref(): If string passed, expected format was 'source:identifier'")
else:
raise Exception("ERROR: Annotation.add_dbxref expected a Dbxref object or string to be passed")
def add_ec_number(self, ec_num):
"""
Note to self: Modify this to allow passing ECAnnotation object or string.
Right now it expects an ECAnnotation object
"""
self.ec_numbers.append(ec_num)
def add_go_annotation(self, go):
"""
Note to self: Modify this to allow passing GOAnnotation object or string.
Right now it expects an GOAnnotation object
"""
self.go_annotations.append(go)
class Dbxref:
"""
These allow for specification of an identifier in another database by ID. These
are not meant to be used for Ontology term linking, but rather limited only to
identifiers. Examples:
SGD:S0006169
KEGG:K06223
The first part is the 'db' and the second is the 'identifier'.
## notes on method signature overloading (for future use)
jorvis: You'd write a @classmethod. I'd name it "fromstring"
jorvis: the classmethod would parse that string into values which it would use to
invoke the normal constructor, as appropriate.
"""
def __init__( self, db=None, identifier=None ):
self.db = db
self.identifier = identifier
class GOAnnotation:
"""
A functional annotation can have an infinite number of associated GO Annotations
Details here:
http://www.geneontology.org/GO.evidence.shtml
Yes, the 'with_from' attribute name is awkward, but 'with/from' isn't legal and
both 'with' and 'from' are python reserved words.
The go_id attribute here is just the numeric portion without "GO" or "GO:" or
anything else attached (allowing the developer to define it as required.)
"""
def __init__( self, go_id=None, ev_code=None, with_from=None ):
self.go_id = go_id
self.ev_code = ev_code
self.with_from = with_from
## process any GO ID passed to only contain the numeric portion
go_pattern = re.compile('(\d+)')
m = go_pattern.search(self.go_id)
if m:
self.go_id = m.group(1)
else:
raise Exception("ERROR: failed to extract numeric portion of ID from new GOAnnotation")
class ECAnnotation:
"""
A functional annotation can have an infinite number of associated EC Annotations
Details here:
http://www.chem.qmul.ac.uk/iubmb/enzyme/
While the official terms for the levels are 'class', 'subclass' etc. we have to use
different keys for the attributes since those conflict with reserved keywords in
both python and other frameworks.
class1 = 1 = Oxidoreductases
class2 = 1.10 = Acting on diphenols and related substances as donors
class3 = 1.10.3 = With oxygen as acceptor
number = 1.10.3.2 = laccase
Currently does not have an index of EC terms to provide other attributes which will
be added in the future, such as:
accepted_name = laccase
reaction = 4 benzenediol + O2 = 4 benzosemiquinone + 2 H2O
systematic_name = benzenediol:oxygen oxidoreductase
CAS_registry_number = 80498-15-3
"""
def __init__( self, number=None ):
self.number = number
self.class1 = None
self.class2 = None
self.class3 = None
re_pattern = re.compile('(((([0-9\-]+)\.[0-9\-]+)\.[0-9\-]+)\.[a-z0-9\-]+)')
m = re_pattern.search(self.number)
if m:
self.class1 = m.group(4)
self.class2 = m.group(3)
self.class3 = m.group(2)
self.number = m.group(1)
else:
raise Exception("ERROR: Attempt to add an EC number ({0}) in unrecognized format. Expected N.N.N.N (where N can be 0-9 or a dash)".format(self.number))
| en | 0.879603 | Warning: this module requires Python 3.2 or higher This is a set of classes to represent what I have most commonly needed when working on dozens (eukaryotic) or hundreds (prokaryotic) of annotation projects. If you have other attributes that you'd like to see supported, please add an 'issue' on the biocode GitHub page. While recognizing that an enormous variety of attributes could go here in describing the functional annotation of a BioThing, I'm starting with those we most often encounter and need to be available in common output formats. These most common attributes are accessed by name, but the others are found as a dict in the 'other_attributes' property. Also, there's a place for having attributes like this abstracted, stored in ontologies, etc. We've done all that before. For now I'm going to try and hopefully enjoy the utility of having the most common properties always directly, and simply available. Stores a Dbxref object within an annotation. The thing passed can either be a Dbxref object or string like "sourcedb:identifier" and it will be automatically parsed. Note to self: Modify this to allow passing ECAnnotation object or string. Right now it expects an ECAnnotation object Note to self: Modify this to allow passing GOAnnotation object or string. Right now it expects an GOAnnotation object These allow for specification of an identifier in another database by ID. These are not meant to be used for Ontology term linking, but rather limited only to identifiers. Examples: SGD:S0006169 KEGG:K06223 The first part is the 'db' and the second is the 'identifier'. ## notes on method signature overloading (for future use) jorvis: You'd write a @classmethod. I'd name it "fromstring" jorvis: the classmethod would parse that string into values which it would use to invoke the normal constructor, as appropriate. A functional annotation can have an infinite number of associated GO Annotations Details here: http://www.geneontology.org/GO.evidence.shtml Yes, the 'with_from' attribute name is awkward, but 'with/from' isn't legal and both 'with' and 'from' are python reserved words. The go_id attribute here is just the numeric portion without "GO" or "GO:" or anything else attached (allowing the developer to define it as required.) ## process any GO ID passed to only contain the numeric portion A functional annotation can have an infinite number of associated EC Annotations Details here: http://www.chem.qmul.ac.uk/iubmb/enzyme/ While the official terms for the levels are 'class', 'subclass' etc. we have to use different keys for the attributes since those conflict with reserved keywords in both python and other frameworks. class1 = 1 = Oxidoreductases class2 = 1.10 = Acting on diphenols and related substances as donors class3 = 1.10.3 = With oxygen as acceptor number = 1.10.3.2 = laccase Currently does not have an index of EC terms to provide other attributes which will be added in the future, such as: accepted_name = laccase reaction = 4 benzenediol + O2 = 4 benzosemiquinone + 2 H2O systematic_name = benzenediol:oxygen oxidoreductase CAS_registry_number = 80498-15-3 | 2.475482 | 2 |
iwmac/views.py | vicinityh2020/iwmac-adapter-tinymesh | 1 | 6612281 | import sys
import requests
from django.conf import settings
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Sensor
# Create your views here.
@csrf_exempt
def test_event(request):
# ip = "192.168.1.200:81"
ip = "POSTMAN_API"
test_url = f'http://{ip}/services/iwmac_plant/parameter_values.php'
sensors = Sensor.objects.all()
# TODO: populate params with elements
post_data = {
"jsonrpc": "2.0",
"method": "read",
"params": {
"parameters": [
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
},
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
},
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
}
],
"id": "0000",
"token": "<PASSWORD>"
},
"id": 1
}
try:
resp = requests.post(url=test_url, json=post_data)
except requests.exceptions.ConnectionError as ce:
print(ce.strerror, file=sys.stderr)
return JsonResponse(data={"error": True}, status=500)
if resp.status_code != requests.status_codes.codes.OK:
return JsonResponse(data={"error": True}, status=requests.status_codes.codes.NOT_FOUND)
# TODO: use filter(id__in(ARRAY_OF_IDS_CHANGED))
# TODO: for all retrieved: check if timestamp > timestamp.old
# TODO: if yes:
# TODO: 1. store new value & timestamp.
# TODO: 2. add to event array object
# TODO: Publish events
return JsonResponse(data=resp.text, status=requests.status_codes.codes.OK)
@csrf_exempt
def thing_descriptor(request):
return JsonResponse(data=settings.THING_DESCRIPTION, status=requests.status_codes.codes.OK)
| import sys
import requests
from django.conf import settings
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Sensor
# Create your views here.
@csrf_exempt
def test_event(request):
# ip = "192.168.1.200:81"
ip = "POSTMAN_API"
test_url = f'http://{ip}/services/iwmac_plant/parameter_values.php'
sensors = Sensor.objects.all()
# TODO: populate params with elements
post_data = {
"jsonrpc": "2.0",
"method": "read",
"params": {
"parameters": [
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
},
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
},
{
"unit": "BAC01",
"element": "rt40_setpunkt_natt_tilluft_2_95"
}
],
"id": "0000",
"token": "<PASSWORD>"
},
"id": 1
}
try:
resp = requests.post(url=test_url, json=post_data)
except requests.exceptions.ConnectionError as ce:
print(ce.strerror, file=sys.stderr)
return JsonResponse(data={"error": True}, status=500)
if resp.status_code != requests.status_codes.codes.OK:
return JsonResponse(data={"error": True}, status=requests.status_codes.codes.NOT_FOUND)
# TODO: use filter(id__in(ARRAY_OF_IDS_CHANGED))
# TODO: for all retrieved: check if timestamp > timestamp.old
# TODO: if yes:
# TODO: 1. store new value & timestamp.
# TODO: 2. add to event array object
# TODO: Publish events
return JsonResponse(data=resp.text, status=requests.status_codes.codes.OK)
@csrf_exempt
def thing_descriptor(request):
return JsonResponse(data=settings.THING_DESCRIPTION, status=requests.status_codes.codes.OK)
| en | 0.450219 | # Create your views here. # ip = "192.168.1.200:81" # TODO: populate params with elements # TODO: use filter(id__in(ARRAY_OF_IDS_CHANGED)) # TODO: for all retrieved: check if timestamp > timestamp.old # TODO: if yes: # TODO: 1. store new value & timestamp. # TODO: 2. add to event array object # TODO: Publish events | 2.200794 | 2 |
pysnmp/CISCO-CBP-TC-MIB.py | agustinhenze/mibs.snmplabs.com | 11 | 6612282 | <filename>pysnmp/CISCO-CBP-TC-MIB.py<gh_stars>10-100
#
# PySNMP MIB module CISCO-CBP-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CBP-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:35:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, MibIdentifier, ModuleIdentity, Bits, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, iso, TimeTicks, ObjectIdentity, IpAddress, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibIdentifier", "ModuleIdentity", "Bits", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "iso", "TimeTicks", "ObjectIdentity", "IpAddress", "Unsigned32", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoCbpTcMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 662))
ciscoCbpTcMIB.setRevisions(('2008-06-24 00:00',))
if mibBuilder.loadTexts: ciscoCbpTcMIB.setLastUpdated('200806240000Z')
if mibBuilder.loadTexts: ciscoCbpTcMIB.setOrganization('Cisco Systems, Inc.')
class CbpElementName(TextualConvention, OctetString):
reference = "<NAME>, <NAME>, <NAME>, 'An Architecture for Describing Simple Network Management Protocol (SNMP) Management Frameworks', RFC-3411, December 2002."
status = 'current'
displayHint = '127a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127)
class CbpElementIdentifier(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpElementIdentifierOrZero(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CbpInstanceIdentifier(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpInstanceIdentifierOrZero(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CbpExecutionPriority(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpExecutionStrategy(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("other", 1), ("doUntilSuccess", 2), ("doUntilFailure", 3), ("doAll", 4))
mibBuilder.exportSymbols("CISCO-CBP-TC-MIB", ciscoCbpTcMIB=ciscoCbpTcMIB, CbpElementIdentifierOrZero=CbpElementIdentifierOrZero, CbpExecutionStrategy=CbpExecutionStrategy, CbpElementName=CbpElementName, CbpElementIdentifier=CbpElementIdentifier, CbpInstanceIdentifier=CbpInstanceIdentifier, CbpInstanceIdentifierOrZero=CbpInstanceIdentifierOrZero, CbpExecutionPriority=CbpExecutionPriority, PYSNMP_MODULE_ID=ciscoCbpTcMIB)
| <filename>pysnmp/CISCO-CBP-TC-MIB.py<gh_stars>10-100
#
# PySNMP MIB module CISCO-CBP-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CBP-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:35:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Integer32, MibIdentifier, ModuleIdentity, Bits, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, NotificationType, iso, TimeTicks, ObjectIdentity, IpAddress, Unsigned32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibIdentifier", "ModuleIdentity", "Bits", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "NotificationType", "iso", "TimeTicks", "ObjectIdentity", "IpAddress", "Unsigned32", "Counter32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ciscoCbpTcMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 662))
ciscoCbpTcMIB.setRevisions(('2008-06-24 00:00',))
if mibBuilder.loadTexts: ciscoCbpTcMIB.setLastUpdated('200806240000Z')
if mibBuilder.loadTexts: ciscoCbpTcMIB.setOrganization('Cisco Systems, Inc.')
class CbpElementName(TextualConvention, OctetString):
reference = "<NAME>, <NAME>, <NAME>, 'An Architecture for Describing Simple Network Management Protocol (SNMP) Management Frameworks', RFC-3411, December 2002."
status = 'current'
displayHint = '127a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 127)
class CbpElementIdentifier(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpElementIdentifierOrZero(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CbpInstanceIdentifier(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpInstanceIdentifierOrZero(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CbpExecutionPriority(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CbpExecutionStrategy(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("other", 1), ("doUntilSuccess", 2), ("doUntilFailure", 3), ("doAll", 4))
mibBuilder.exportSymbols("CISCO-CBP-TC-MIB", ciscoCbpTcMIB=ciscoCbpTcMIB, CbpElementIdentifierOrZero=CbpElementIdentifierOrZero, CbpExecutionStrategy=CbpExecutionStrategy, CbpElementName=CbpElementName, CbpElementIdentifier=CbpElementIdentifier, CbpInstanceIdentifier=CbpInstanceIdentifier, CbpInstanceIdentifierOrZero=CbpInstanceIdentifierOrZero, CbpExecutionPriority=CbpExecutionPriority, PYSNMP_MODULE_ID=ciscoCbpTcMIB)
| en | 0.401617 | # # PySNMP MIB module CISCO-CBP-TC-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-CBP-TC-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 17:35:21 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # | 1.662758 | 2 |
pipelines/abalone_preparers.py | leanguardia/msc-project-pipelines | 0 | 6612283 | <reponame>leanguardia/msc-project-pipelines
import numpy as np
from pipelines.transformation import dummify
from pipelines.preparer import Preparer
from pipelines.abalone_schema import abalone_schema
class AbalonePreparerETL(Preparer):
def __init__(self):
super(AbalonePreparerETL, self).__init__(abalone_schema)
self.input_validator.add_validators(self.schema.validators(which='input'))
def prepare(self, data):
df = super(AbalonePreparerETL, self).prepare(data)
self.input_validator.validate(df)
df['age'] = df['rings'] + 1.5
df = dummify(df, 'sex', ['M','F','I'])
return df
class AbalonePreparer(Preparer):
def __init__(self):
super(AbalonePreparer, self).__init__(abalone_schema)
def prepare(self, data):
df = super(AbalonePreparer, self).prepare(data)
df = dummify(df, 'sex', ['M','F','I'])
selected_features = ['length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight', 'M', 'F']
return df[selected_features].copy()
| import numpy as np
from pipelines.transformation import dummify
from pipelines.preparer import Preparer
from pipelines.abalone_schema import abalone_schema
class AbalonePreparerETL(Preparer):
def __init__(self):
super(AbalonePreparerETL, self).__init__(abalone_schema)
self.input_validator.add_validators(self.schema.validators(which='input'))
def prepare(self, data):
df = super(AbalonePreparerETL, self).prepare(data)
self.input_validator.validate(df)
df['age'] = df['rings'] + 1.5
df = dummify(df, 'sex', ['M','F','I'])
return df
class AbalonePreparer(Preparer):
def __init__(self):
super(AbalonePreparer, self).__init__(abalone_schema)
def prepare(self, data):
df = super(AbalonePreparer, self).prepare(data)
df = dummify(df, 'sex', ['M','F','I'])
selected_features = ['length', 'diameter', 'height', 'whole_weight',
'shucked_weight', 'viscera_weight', 'shell_weight', 'M', 'F']
return df[selected_features].copy() | none | 1 | 2.602748 | 3 | |
models/custom_time_zone.py | MIchaelMainer/msgraph-v10-models-python | 1 | 6612284 | # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.standard_time_zone_offset import StandardTimeZoneOffset
from ..model.daylight_time_zone_offset import DaylightTimeZoneOffset
from ..one_drive_object_base import OneDriveObjectBase
class CustomTimeZone(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def bias(self):
"""Gets and sets the bias
Returns:
int:
The bias
"""
if "bias" in self._prop_dict:
return self._prop_dict["bias"]
else:
return None
@bias.setter
def bias(self, val):
self._prop_dict["bias"] = val
@property
def standard_offset(self):
"""
Gets and sets the standardOffset
Returns:
:class:`StandardTimeZoneOffset<onedrivesdk.model.standard_time_zone_offset.StandardTimeZoneOffset>`:
The standardOffset
"""
if "standardOffset" in self._prop_dict:
if isinstance(self._prop_dict["standardOffset"], OneDriveObjectBase):
return self._prop_dict["standardOffset"]
else :
self._prop_dict["standardOffset"] = StandardTimeZoneOffset(self._prop_dict["standardOffset"])
return self._prop_dict["standardOffset"]
return None
@standard_offset.setter
def standard_offset(self, val):
self._prop_dict["standardOffset"] = val
@property
def daylight_offset(self):
"""
Gets and sets the daylightOffset
Returns:
:class:`DaylightTimeZoneOffset<onedrivesdk.model.daylight_time_zone_offset.DaylightTimeZoneOffset>`:
The daylightOffset
"""
if "daylightOffset" in self._prop_dict:
if isinstance(self._prop_dict["daylightOffset"], OneDriveObjectBase):
return self._prop_dict["daylightOffset"]
else :
self._prop_dict["daylightOffset"] = DaylightTimeZoneOffset(self._prop_dict["daylightOffset"])
return self._prop_dict["daylightOffset"]
return None
@daylight_offset.setter
def daylight_offset(self, val):
self._prop_dict["daylightOffset"] = val
| # -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.standard_time_zone_offset import StandardTimeZoneOffset
from ..model.daylight_time_zone_offset import DaylightTimeZoneOffset
from ..one_drive_object_base import OneDriveObjectBase
class CustomTimeZone(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def bias(self):
"""Gets and sets the bias
Returns:
int:
The bias
"""
if "bias" in self._prop_dict:
return self._prop_dict["bias"]
else:
return None
@bias.setter
def bias(self, val):
self._prop_dict["bias"] = val
@property
def standard_offset(self):
"""
Gets and sets the standardOffset
Returns:
:class:`StandardTimeZoneOffset<onedrivesdk.model.standard_time_zone_offset.StandardTimeZoneOffset>`:
The standardOffset
"""
if "standardOffset" in self._prop_dict:
if isinstance(self._prop_dict["standardOffset"], OneDriveObjectBase):
return self._prop_dict["standardOffset"]
else :
self._prop_dict["standardOffset"] = StandardTimeZoneOffset(self._prop_dict["standardOffset"])
return self._prop_dict["standardOffset"]
return None
@standard_offset.setter
def standard_offset(self, val):
self._prop_dict["standardOffset"] = val
@property
def daylight_offset(self):
"""
Gets and sets the daylightOffset
Returns:
:class:`DaylightTimeZoneOffset<onedrivesdk.model.daylight_time_zone_offset.DaylightTimeZoneOffset>`:
The daylightOffset
"""
if "daylightOffset" in self._prop_dict:
if isinstance(self._prop_dict["daylightOffset"], OneDriveObjectBase):
return self._prop_dict["daylightOffset"]
else :
self._prop_dict["daylightOffset"] = DaylightTimeZoneOffset(self._prop_dict["daylightOffset"])
return self._prop_dict["daylightOffset"]
return None
@daylight_offset.setter
def daylight_offset(self, val):
self._prop_dict["daylightOffset"] = val
| en | 0.548757 | # -*- coding: utf-8 -*- # Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. # # This file was generated and any changes will be overwritten. Gets and sets the bias Returns: int: The bias Gets and sets the standardOffset Returns: :class:`StandardTimeZoneOffset<onedrivesdk.model.standard_time_zone_offset.StandardTimeZoneOffset>`: The standardOffset Gets and sets the daylightOffset Returns: :class:`DaylightTimeZoneOffset<onedrivesdk.model.daylight_time_zone_offset.DaylightTimeZoneOffset>`: The daylightOffset | 2.290213 | 2 |
master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/ui/branches.py | AlexRogalskiy/DevArtifacts | 4 | 6612285 | <gh_stars>1-10
from __future__ import absolute_import
#
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by <NAME> <<EMAIL>>
# Copyright (C) 2007-2008 by <NAME> <<EMAIL>>
# Copyright (C) 2008-2010 by <NAME> <<EMAIL>>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
import os
import pygtk
import gobject
import gtk
import pango
from datetime import datetime
import time
from rabbitvcs.ui import InterfaceView
from rabbitvcs.ui.action import GitAction
from rabbitvcs.ui.log import log_dialog_factory
import rabbitvcs.ui.widget
from rabbitvcs.ui.dialog import DeleteConfirmation
import rabbitvcs.util.helper
import rabbitvcs.vcs
from xml.sax import saxutils
from rabbitvcs import gettext
import six
_ = gettext.gettext
STATE_ADD = 0
STATE_EDIT = 1
class GitBranchManager(InterfaceView):
"""
Provides a UI interface to manage items
"""
state = STATE_ADD
def __init__(self, path, revision=""):
InterfaceView.__init__(self, "manager", "Manager")
self.path = path
self.get_widget("right_side").show()
self.get_widget("Manager").set_size_request(695, -1)
self.get_widget("Manager").set_title(_("Branch Manager"))
self.get_widget("items_label").set_markup(_("<b>Branches</b>"))
self.vcs = rabbitvcs.vcs.VCS()
self.git = self.vcs.git(path)
self.revision = self.git.revision(revision)
self.selected_branch = None
self.items_treeview = rabbitvcs.ui.widget.Table(
self.get_widget("items_treeview"),
[rabbitvcs.ui.widget.TYPE_MARKUP],
[_("Branch")],
callbacks={
"mouse-event": self.on_treeview_mouse_event,
"key-event": self.on_treeview_key_event
}
)
self.initialize_detail()
self.load()
if self.revision:
revision_branches = self.git.branch_list(self.revision)
if revision_branches:
self.show_edit(revision_branches[0].name)
else:
self.show_add()
else:
self.show_add()
def initialize_detail(self):
self.detail_container = self.get_widget("detail_container")
vbox = gtk.VBox(False, 6)
# Set up the Branch line
label = gtk.Label(_("Name:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=.5)
self.branch_entry = gtk.Entry()
self.branch_name_container = gtk.HBox(False, 0)
self.branch_name_container.pack_start(label, False, False, 0)
self.branch_name_container.pack_start(self.branch_entry, False, False, 0)
vbox.pack_start(self.branch_name_container, False, False, 0)
# Set up the Commit-sha line
label = gtk.Label(_("Start Point:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=.5)
self.start_point_entry = gtk.Entry()
self.start_point_entry.set_size_request(300, -1)
self.start_point_container = gtk.HBox(False, 0)
self.log_dialog_button = gtk.Button()
self.log_dialog_button.connect("clicked", self.on_log_dialog_button_clicked)
image = gtk.Image()
image.set_from_icon_name("rabbitvcs-show_log", 2)
self.log_dialog_button.set_image(image)
self.start_point_container.pack_start(label, False, False, 0)
self.start_point_container.pack_start(self.start_point_entry, False, False, 0)
self.start_point_container.pack_start(self.log_dialog_button, False, False, 0)
vbox.pack_start(self.start_point_container, False, False, 0)
# Set up the Track line
label = gtk.Label("")
label.set_size_request(90, -1)
self.track_checkbox = gtk.CheckButton(_("Keep old branch's history"))
self.track_container = gtk.HBox(False, 0)
self.track_container.pack_start(label, False, False, 0)
self.track_container.pack_start(self.track_checkbox, False, False, 0)
vbox.pack_start(self.track_container, False, False, 0)
# Set up the checkout line
label = gtk.Label("")
label.set_size_request(90, -1)
self.checkout_checkbox = gtk.CheckButton(_("Set as active branch"))
self.checkout_container = gtk.HBox(False, 0)
self.checkout_container.pack_start(label, False, False, 0)
self.checkout_container.pack_start(self.checkout_checkbox, False, False, 0)
vbox.pack_start(self.checkout_container, False, False, 0)
# Set up Save button
label = gtk.Label("")
label.set_size_request(90, -1)
self.save_button = gtk.Button(label=_("Save"))
self.save_button.connect("clicked", self.on_save_clicked)
self.save_container = gtk.HBox(False, 0)
self.save_container.pack_start(label, False, False, 0)
self.save_container.pack_start(self.save_button, False, False, 0)
vbox.pack_start(self.save_container, False, False, 0)
# Set up the Revision line
label = gtk.Label(_("Revision:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=0)
self.revision_label = gtk.Label("")
self.revision_label.set_properties(xalign=0,selectable=True)
self.revision_label.set_line_wrap(True)
self.revision_container = gtk.HBox(False, 0)
self.revision_container.pack_start(label, False, False, 0)
self.revision_container.pack_start(self.revision_label, False, False, 0)
vbox.pack_start(self.revision_container, False, False, 0)
# Set up the Log Message line
label = gtk.Label(_("Message:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=0)
self.message_label = gtk.Label("")
self.message_label.set_properties(xalign=0,yalign=0,selectable=True)
self.message_label.set_line_wrap(True)
self.message_label.set_size_request(250, -1)
self.message_container = gtk.HBox(False, 0)
self.message_container.pack_start(label, False, False, 0)
self.message_container.pack_start(self.message_label, False, False, 0)
vbox.pack_start(self.message_container, False, False, 0)
self.add_containers = [self.branch_name_container, self.track_container,
self.save_container, self.start_point_container,
self.checkout_container]
self.view_containers = [self.branch_name_container, self.revision_container,
self.message_container, self.save_container, self.checkout_container]
self.all_containers = [self.branch_name_container, self.track_container,
self.revision_container, self.message_container, self.save_container,
self.start_point_container, self.checkout_container]
vbox.show()
self.detail_container.add(vbox)
def load(self):
self.items_treeview.clear()
self.branch_list = self.git.branch_list()
for item in self.branch_list:
name = saxutils.escape(item.name)
if item.tracking:
name = "<b>%s</b>" % name
self.items_treeview.append([name])
def on_add_clicked(self, widget):
self.show_add()
def on_delete_clicked(self, widget):
items = self.items_treeview.get_selected_row_items(0)
selected = []
for branch in items:
selected.append(saxutils.unescape(branch).replace("<b>", "").replace("</b>", ""))
confirm = rabbitvcs.ui.dialog.Confirmation(_("Are you sure you want to delete %s?" % ", ".join(selected)))
result = confirm.run()
if result == gtk.RESPONSE_OK or result == True:
for branch in selected:
self.git.branch_delete(branch)
self.load()
self.show_add()
def on_save_clicked(self, widget):
if self.state == STATE_ADD:
branch_name = self.branch_entry.get_text()
branch_track = self.track_checkbox.get_active()
start_point = self.git.revision(self.start_point_entry.get_text())
self.git.branch(branch_name, revision=start_point)
elif self.state == STATE_EDIT:
branch_name = self.branch_entry.get_text()
branch_track = self.track_checkbox.get_active()
if self.selected_branch.name != branch_name:
self.git.branch_rename(self.selected_branch.name, branch_name)
if self.checkout_checkbox.get_active():
self.git.checkout([], self.git.revision(branch_name))
self.load()
self.show_edit(branch_name)
def on_treeview_key_event(self, treeview, data=None):
if gtk.gdk.keyval_name(data.keyval) in ("Up", "Down", "Return"):
self.on_treeview_event(treeview, data)
def on_treeview_mouse_event(self, treeview, data=None):
self.on_treeview_event(treeview, data)
def on_treeview_event(self, treeview, data):
selected = self.items_treeview.get_selected_row_items(0)
if len(selected) > 0:
if len(selected) == 1:
branch_name = selected[0]
if branch_name.startswith("<b>"):
branch_name = branch_name[3:-4]
self.show_edit(branch_name)
self.get_widget("delete").set_sensitive(True)
else:
self.show_add()
def show_containers(self, containers):
for container in self.all_containers:
container.hide()
for container in containers:
container.show_all()
def show_add(self):
self.state = STATE_ADD
revision = "HEAD"
if self.revision:
active_branch = self.git.get_active_branch()
if active_branch:
revision = six.text_type(active_branch.name)
self.items_treeview.unselect_all()
self.branch_entry.set_text("")
self.save_button.set_label(_("Add"))
self.start_point_entry.set_text(revision)
self.track_checkbox.set_active(True)
self.checkout_checkbox.set_sensitive(True)
self.checkout_checkbox.set_active(False)
self.show_containers(self.add_containers)
self.get_widget("detail_label").set_markup(_("<b>Add Branch</b>"))
def show_edit(self, branch_name):
self.state = STATE_EDIT
branch_name = saxutils.unescape(branch_name)
self.selected_branch = None
for item in self.branch_list:
if item.name == branch_name:
self.selected_branch = item
break
self.save_button.set_label(_("Save"))
if self.selected_branch:
self.branch_entry.set_text(self.selected_branch.name)
self.revision_label.set_text(six.text_type(self.selected_branch.revision))
self.message_label.set_text(self.selected_branch.message.rstrip("\n"))
if self.selected_branch.tracking:
self.checkout_checkbox.set_active(True)
self.checkout_checkbox.set_sensitive(False)
else:
self.checkout_checkbox.set_active(False)
self.checkout_checkbox.set_sensitive(True)
self.show_containers(self.view_containers)
self.get_widget("detail_label").set_markup(_("<b>Branch Detail</b>"))
def on_log_dialog_button_clicked(self, widget):
log_dialog_factory(
self.path,
ok_callback=self.on_log_dialog_closed
)
def on_log_dialog_closed(self, data):
if data:
self.start_point_entry.set_text(data)
if __name__ == "__main__":
from rabbitvcs.ui import main, REVISION_OPT, VCS_OPT
(options, paths) = main(
[REVISION_OPT, VCS_OPT],
usage="Usage: rabbitvcs branch-manager path [-r revision]"
)
window = GitBranchManager(paths[0], revision=options.revision)
window.register_gtk_quit()
gtk.main()
| from __future__ import absolute_import
#
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by <NAME> <<EMAIL>>
# Copyright (C) 2007-2008 by <NAME> <<EMAIL>>
# Copyright (C) 2008-2010 by <NAME> <<EMAIL>>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
import os
import pygtk
import gobject
import gtk
import pango
from datetime import datetime
import time
from rabbitvcs.ui import InterfaceView
from rabbitvcs.ui.action import GitAction
from rabbitvcs.ui.log import log_dialog_factory
import rabbitvcs.ui.widget
from rabbitvcs.ui.dialog import DeleteConfirmation
import rabbitvcs.util.helper
import rabbitvcs.vcs
from xml.sax import saxutils
from rabbitvcs import gettext
import six
_ = gettext.gettext
STATE_ADD = 0
STATE_EDIT = 1
class GitBranchManager(InterfaceView):
"""
Provides a UI interface to manage items
"""
state = STATE_ADD
def __init__(self, path, revision=""):
InterfaceView.__init__(self, "manager", "Manager")
self.path = path
self.get_widget("right_side").show()
self.get_widget("Manager").set_size_request(695, -1)
self.get_widget("Manager").set_title(_("Branch Manager"))
self.get_widget("items_label").set_markup(_("<b>Branches</b>"))
self.vcs = rabbitvcs.vcs.VCS()
self.git = self.vcs.git(path)
self.revision = self.git.revision(revision)
self.selected_branch = None
self.items_treeview = rabbitvcs.ui.widget.Table(
self.get_widget("items_treeview"),
[rabbitvcs.ui.widget.TYPE_MARKUP],
[_("Branch")],
callbacks={
"mouse-event": self.on_treeview_mouse_event,
"key-event": self.on_treeview_key_event
}
)
self.initialize_detail()
self.load()
if self.revision:
revision_branches = self.git.branch_list(self.revision)
if revision_branches:
self.show_edit(revision_branches[0].name)
else:
self.show_add()
else:
self.show_add()
def initialize_detail(self):
self.detail_container = self.get_widget("detail_container")
vbox = gtk.VBox(False, 6)
# Set up the Branch line
label = gtk.Label(_("Name:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=.5)
self.branch_entry = gtk.Entry()
self.branch_name_container = gtk.HBox(False, 0)
self.branch_name_container.pack_start(label, False, False, 0)
self.branch_name_container.pack_start(self.branch_entry, False, False, 0)
vbox.pack_start(self.branch_name_container, False, False, 0)
# Set up the Commit-sha line
label = gtk.Label(_("Start Point:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=.5)
self.start_point_entry = gtk.Entry()
self.start_point_entry.set_size_request(300, -1)
self.start_point_container = gtk.HBox(False, 0)
self.log_dialog_button = gtk.Button()
self.log_dialog_button.connect("clicked", self.on_log_dialog_button_clicked)
image = gtk.Image()
image.set_from_icon_name("rabbitvcs-show_log", 2)
self.log_dialog_button.set_image(image)
self.start_point_container.pack_start(label, False, False, 0)
self.start_point_container.pack_start(self.start_point_entry, False, False, 0)
self.start_point_container.pack_start(self.log_dialog_button, False, False, 0)
vbox.pack_start(self.start_point_container, False, False, 0)
# Set up the Track line
label = gtk.Label("")
label.set_size_request(90, -1)
self.track_checkbox = gtk.CheckButton(_("Keep old branch's history"))
self.track_container = gtk.HBox(False, 0)
self.track_container.pack_start(label, False, False, 0)
self.track_container.pack_start(self.track_checkbox, False, False, 0)
vbox.pack_start(self.track_container, False, False, 0)
# Set up the checkout line
label = gtk.Label("")
label.set_size_request(90, -1)
self.checkout_checkbox = gtk.CheckButton(_("Set as active branch"))
self.checkout_container = gtk.HBox(False, 0)
self.checkout_container.pack_start(label, False, False, 0)
self.checkout_container.pack_start(self.checkout_checkbox, False, False, 0)
vbox.pack_start(self.checkout_container, False, False, 0)
# Set up Save button
label = gtk.Label("")
label.set_size_request(90, -1)
self.save_button = gtk.Button(label=_("Save"))
self.save_button.connect("clicked", self.on_save_clicked)
self.save_container = gtk.HBox(False, 0)
self.save_container.pack_start(label, False, False, 0)
self.save_container.pack_start(self.save_button, False, False, 0)
vbox.pack_start(self.save_container, False, False, 0)
# Set up the Revision line
label = gtk.Label(_("Revision:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=0)
self.revision_label = gtk.Label("")
self.revision_label.set_properties(xalign=0,selectable=True)
self.revision_label.set_line_wrap(True)
self.revision_container = gtk.HBox(False, 0)
self.revision_container.pack_start(label, False, False, 0)
self.revision_container.pack_start(self.revision_label, False, False, 0)
vbox.pack_start(self.revision_container, False, False, 0)
# Set up the Log Message line
label = gtk.Label(_("Message:"))
label.set_size_request(90, -1)
label.set_properties(xalign=0,yalign=0)
self.message_label = gtk.Label("")
self.message_label.set_properties(xalign=0,yalign=0,selectable=True)
self.message_label.set_line_wrap(True)
self.message_label.set_size_request(250, -1)
self.message_container = gtk.HBox(False, 0)
self.message_container.pack_start(label, False, False, 0)
self.message_container.pack_start(self.message_label, False, False, 0)
vbox.pack_start(self.message_container, False, False, 0)
self.add_containers = [self.branch_name_container, self.track_container,
self.save_container, self.start_point_container,
self.checkout_container]
self.view_containers = [self.branch_name_container, self.revision_container,
self.message_container, self.save_container, self.checkout_container]
self.all_containers = [self.branch_name_container, self.track_container,
self.revision_container, self.message_container, self.save_container,
self.start_point_container, self.checkout_container]
vbox.show()
self.detail_container.add(vbox)
def load(self):
self.items_treeview.clear()
self.branch_list = self.git.branch_list()
for item in self.branch_list:
name = saxutils.escape(item.name)
if item.tracking:
name = "<b>%s</b>" % name
self.items_treeview.append([name])
def on_add_clicked(self, widget):
self.show_add()
def on_delete_clicked(self, widget):
items = self.items_treeview.get_selected_row_items(0)
selected = []
for branch in items:
selected.append(saxutils.unescape(branch).replace("<b>", "").replace("</b>", ""))
confirm = rabbitvcs.ui.dialog.Confirmation(_("Are you sure you want to delete %s?" % ", ".join(selected)))
result = confirm.run()
if result == gtk.RESPONSE_OK or result == True:
for branch in selected:
self.git.branch_delete(branch)
self.load()
self.show_add()
def on_save_clicked(self, widget):
if self.state == STATE_ADD:
branch_name = self.branch_entry.get_text()
branch_track = self.track_checkbox.get_active()
start_point = self.git.revision(self.start_point_entry.get_text())
self.git.branch(branch_name, revision=start_point)
elif self.state == STATE_EDIT:
branch_name = self.branch_entry.get_text()
branch_track = self.track_checkbox.get_active()
if self.selected_branch.name != branch_name:
self.git.branch_rename(self.selected_branch.name, branch_name)
if self.checkout_checkbox.get_active():
self.git.checkout([], self.git.revision(branch_name))
self.load()
self.show_edit(branch_name)
def on_treeview_key_event(self, treeview, data=None):
if gtk.gdk.keyval_name(data.keyval) in ("Up", "Down", "Return"):
self.on_treeview_event(treeview, data)
def on_treeview_mouse_event(self, treeview, data=None):
self.on_treeview_event(treeview, data)
def on_treeview_event(self, treeview, data):
selected = self.items_treeview.get_selected_row_items(0)
if len(selected) > 0:
if len(selected) == 1:
branch_name = selected[0]
if branch_name.startswith("<b>"):
branch_name = branch_name[3:-4]
self.show_edit(branch_name)
self.get_widget("delete").set_sensitive(True)
else:
self.show_add()
def show_containers(self, containers):
for container in self.all_containers:
container.hide()
for container in containers:
container.show_all()
def show_add(self):
self.state = STATE_ADD
revision = "HEAD"
if self.revision:
active_branch = self.git.get_active_branch()
if active_branch:
revision = six.text_type(active_branch.name)
self.items_treeview.unselect_all()
self.branch_entry.set_text("")
self.save_button.set_label(_("Add"))
self.start_point_entry.set_text(revision)
self.track_checkbox.set_active(True)
self.checkout_checkbox.set_sensitive(True)
self.checkout_checkbox.set_active(False)
self.show_containers(self.add_containers)
self.get_widget("detail_label").set_markup(_("<b>Add Branch</b>"))
def show_edit(self, branch_name):
self.state = STATE_EDIT
branch_name = saxutils.unescape(branch_name)
self.selected_branch = None
for item in self.branch_list:
if item.name == branch_name:
self.selected_branch = item
break
self.save_button.set_label(_("Save"))
if self.selected_branch:
self.branch_entry.set_text(self.selected_branch.name)
self.revision_label.set_text(six.text_type(self.selected_branch.revision))
self.message_label.set_text(self.selected_branch.message.rstrip("\n"))
if self.selected_branch.tracking:
self.checkout_checkbox.set_active(True)
self.checkout_checkbox.set_sensitive(False)
else:
self.checkout_checkbox.set_active(False)
self.checkout_checkbox.set_sensitive(True)
self.show_containers(self.view_containers)
self.get_widget("detail_label").set_markup(_("<b>Branch Detail</b>"))
def on_log_dialog_button_clicked(self, widget):
log_dialog_factory(
self.path,
ok_callback=self.on_log_dialog_closed
)
def on_log_dialog_closed(self, data):
if data:
self.start_point_entry.set_text(data)
if __name__ == "__main__":
from rabbitvcs.ui import main, REVISION_OPT, VCS_OPT
(options, paths) = main(
[REVISION_OPT, VCS_OPT],
usage="Usage: rabbitvcs branch-manager path [-r revision]"
)
window = GitBranchManager(paths[0], revision=options.revision)
window.register_gtk_quit()
gtk.main() | en | 0.831384 | # # This is an extension to the Nautilus file manager to allow better # integration with the Subversion source control system. # # Copyright (C) 2006-2008 by <NAME> <<EMAIL>> # Copyright (C) 2007-2008 by <NAME> <<EMAIL>> # Copyright (C) 2008-2010 by <NAME> <<EMAIL>> # # RabbitVCS is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # RabbitVCS is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>. # Provides a UI interface to manage items # Set up the Branch line # Set up the Commit-sha line # Set up the Track line # Set up the checkout line # Set up Save button # Set up the Revision line # Set up the Log Message line | 1.582805 | 2 |
src/dataCrawler_Goodinfo.py | Ping6666/Stock-Project | 2 | 6612286 | # version: 0.1
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
def downloadData(stockID, year):
time.sleep(10)
FilePath_ = "D:/Programming/StockProject/code/src/chromedriver.exe"
urlOriginal_ = "https://goodinfo.tw/StockInfo/ShowK_Chart.asp?STOCK_ID=" + str(
stockID) + "&CHT_CAT2=DATE"
# Options
options = Options()
options.add_argument("--disable-notifications")
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--incognito')
userAgent = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"
options.add_argument("user-agent={}".format(userAgent))
options.add_experimental_option(
"prefs", {
"download.default_directory":
r"D:\Programming\StockProject\code\rawData",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
# webdriver.Chrome
new_ = webdriver.Chrome(FilePath_, options=options)
new_.minimize_window()
new_.get(urlOriginal_)
try:
new_.execute_script(
"ReloadReport('ShowK_Chart.asp?STOCK_ID=" + str(stockID) +
"&CHT_CAT2=DATE&STEP=DATA&PERIOD='" +
"+encodeURIComponent(365),divK_ChartDetail,txtK_ChartDetailLoading);"
)
except KeyboardInterrupt:
exit(3)
except:
print("When process stock no. " + str(stockID) +
" BAD thing was happened.")
return
# check data has been loaded
tmp = True
while tmp:
try:
element = new_.find_element_by_id('txtK_ChartDetailLoading')
tmp = element.is_displayed()
except:
tmp = False
break
# download data
new_.execute_script(
"export2html(divPriceDetail.innerHTML,'K_Chart.html');")
tmp = True
while tmp:
tmp = not os.path.exists("../rawData/K_Chart.html")
newName = '../rawData/K_Chart-' + str(stockID) + '-' + str(year) + '.html'
os.rename('../rawData/K_Chart.html', newName)
# close window
new_.close()
return
def listProcess(newList):
for new_ in newList:
try:
downloadData(str(new_), '2021')
except KeyboardInterrupt:
exit(3)
except:
print("Some Error occur when processing stock no. " + str(new_) +
".")
return
def readFromFile(fileName):
tmpList_ = []
f = open(fileName, 'r', encoding='utf-8')
tmpList = f.readlines()
for tmp in tmpList:
tmp_ = tmp.replace('\n', '')
tmpList_.append(tmp_)
return tmpList_
currentList = readFromFile('../stockNumber/stock.txt')
# print(currentList)
listProcess(currentList)
| # version: 0.1
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
def downloadData(stockID, year):
time.sleep(10)
FilePath_ = "D:/Programming/StockProject/code/src/chromedriver.exe"
urlOriginal_ = "https://goodinfo.tw/StockInfo/ShowK_Chart.asp?STOCK_ID=" + str(
stockID) + "&CHT_CAT2=DATE"
# Options
options = Options()
options.add_argument("--disable-notifications")
options.add_argument('--ignore-certificate-errors')
options.add_argument('--ignore-ssl-errors')
options.add_argument('--incognito')
userAgent = "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"
options.add_argument("user-agent={}".format(userAgent))
options.add_experimental_option(
"prefs", {
"download.default_directory":
r"D:\Programming\StockProject\code\rawData",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True
})
# webdriver.Chrome
new_ = webdriver.Chrome(FilePath_, options=options)
new_.minimize_window()
new_.get(urlOriginal_)
try:
new_.execute_script(
"ReloadReport('ShowK_Chart.asp?STOCK_ID=" + str(stockID) +
"&CHT_CAT2=DATE&STEP=DATA&PERIOD='" +
"+encodeURIComponent(365),divK_ChartDetail,txtK_ChartDetailLoading);"
)
except KeyboardInterrupt:
exit(3)
except:
print("When process stock no. " + str(stockID) +
" BAD thing was happened.")
return
# check data has been loaded
tmp = True
while tmp:
try:
element = new_.find_element_by_id('txtK_ChartDetailLoading')
tmp = element.is_displayed()
except:
tmp = False
break
# download data
new_.execute_script(
"export2html(divPriceDetail.innerHTML,'K_Chart.html');")
tmp = True
while tmp:
tmp = not os.path.exists("../rawData/K_Chart.html")
newName = '../rawData/K_Chart-' + str(stockID) + '-' + str(year) + '.html'
os.rename('../rawData/K_Chart.html', newName)
# close window
new_.close()
return
def listProcess(newList):
for new_ in newList:
try:
downloadData(str(new_), '2021')
except KeyboardInterrupt:
exit(3)
except:
print("Some Error occur when processing stock no. " + str(new_) +
".")
return
def readFromFile(fileName):
tmpList_ = []
f = open(fileName, 'r', encoding='utf-8')
tmpList = f.readlines()
for tmp in tmpList:
tmp_ = tmp.replace('\n', '')
tmpList_.append(tmp_)
return tmpList_
currentList = readFromFile('../stockNumber/stock.txt')
# print(currentList)
listProcess(currentList)
| en | 0.79594 | # version: 0.1 # Options # webdriver.Chrome # check data has been loaded # download data # close window # print(currentList) | 2.941746 | 3 |
hoover/search/middleware.py | liquidinvestigations/hoover-search | 1 | 6612287 | <reponame>liquidinvestigations/hoover-search<filename>hoover/search/middleware.py
from django.contrib.auth.models import User
from django.utils.cache import add_never_cache_headers
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.contrib.auth.middleware import RemoteUserMiddleware
from hoover.search.models import Profile
class NoReferral(MiddlewareMixin):
def process_response(self, request, response):
response['X-Content-Type-Options'] = 'nosniff'
return response
class NoCache(MiddlewareMixin):
def process_response(self, request, response):
if 'Cache-Control' not in response:
add_never_cache_headers(response)
return response
class AuthproxyUserMiddleware(RemoteUserMiddleware):
header = 'HTTP_X_FORWARDED_USER'
def process_request(self, request):
if not settings.HOOVER_AUTHPROXY:
return
super().process_request(request)
if not request.META.get(self.header):
return
user = request.user
username = request.META.get(self.header)
assert user.username == username
email = request.META.get('HTTP_X_FORWARDED_EMAIL')
full_name = request.META.get('HTTP_X_FORWARDED_PREFERRED_USERNAME')
groups = [
x.strip()
for x in request.META.get('HTTP_X_FORWARDED_GROUPS').split(',')
]
is_admin = ('admin' in groups)
save = False
if not User.objects.filter(username=username).exists():
save = True
if is_admin != user.is_superuser or is_admin != user.is_staff:
user.is_superuser = is_admin
user.is_staff = is_admin
save = True
if email != user.email:
user.email = email
save = True
if full_name != user.get_full_name() and \
full_name != user.get_username() and \
' ' in full_name:
user.first_name, user.last_name = full_name.split(' ', maxsplit=1)
save = True
if save:
user.set_unusable_password()
user.save()
Profile.objects.get_or_create(user=user)
| from django.contrib.auth.models import User
from django.utils.cache import add_never_cache_headers
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
from django.contrib.auth.middleware import RemoteUserMiddleware
from hoover.search.models import Profile
class NoReferral(MiddlewareMixin):
def process_response(self, request, response):
response['X-Content-Type-Options'] = 'nosniff'
return response
class NoCache(MiddlewareMixin):
def process_response(self, request, response):
if 'Cache-Control' not in response:
add_never_cache_headers(response)
return response
class AuthproxyUserMiddleware(RemoteUserMiddleware):
header = 'HTTP_X_FORWARDED_USER'
def process_request(self, request):
if not settings.HOOVER_AUTHPROXY:
return
super().process_request(request)
if not request.META.get(self.header):
return
user = request.user
username = request.META.get(self.header)
assert user.username == username
email = request.META.get('HTTP_X_FORWARDED_EMAIL')
full_name = request.META.get('HTTP_X_FORWARDED_PREFERRED_USERNAME')
groups = [
x.strip()
for x in request.META.get('HTTP_X_FORWARDED_GROUPS').split(',')
]
is_admin = ('admin' in groups)
save = False
if not User.objects.filter(username=username).exists():
save = True
if is_admin != user.is_superuser or is_admin != user.is_staff:
user.is_superuser = is_admin
user.is_staff = is_admin
save = True
if email != user.email:
user.email = email
save = True
if full_name != user.get_full_name() and \
full_name != user.get_username() and \
' ' in full_name:
user.first_name, user.last_name = full_name.split(' ', maxsplit=1)
save = True
if save:
user.set_unusable_password()
user.save()
Profile.objects.get_or_create(user=user) | none | 1 | 1.961199 | 2 | |
backend/medicar/tests/test_views_specialty.py | WesGtoX/medicar | 2 | 6612288 | from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import SpecialtyFactory
User = get_user_model()
class SpecialtyViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.anon_user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
def test_perform_create(self):
"""register a new specialty"""
data = {'name': 'Cardiologia'}
response = self.unath_client.post(reverse('specialty-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('specialty-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_list(self):
"""listing all specialties"""
SpecialtyFactory.create_batch(5)
response = self.unath_client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
"""details of a specific specialty"""
specialty = SpecialtyFactory.create(id=10)
response = self.unath_client.get(reverse('specialty-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], specialty.name)
def test_update(self):
"""methods not allowed returned error 405"""
specialty = SpecialtyFactory.create(id=21)
data = {'name': 'Joe'}
self.assertNotEqual(specialty.name, data['name'])
response = self.unath_client.put(reverse('specialty-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('specialty-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_partial_update(self):
"""methods not allowed returned error 405"""
specialty = SpecialtyFactory.create(id=22)
data = {'name': 'Joe'}
self.assertNotEqual(specialty.name, data['name'])
response = self.unath_client.patch(reverse('specialty-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('specialty-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_destroy(self):
"""methods not allowed returned error 405"""
SpecialtyFactory.create(id=15)
response = self.unath_client.get(reverse('specialty-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('specialty-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
| from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.test import APITestCase, APIClient
from .fixture import SpecialtyFactory
User = get_user_model()
class SpecialtyViewSetTests(APITestCase):
def setUp(self):
self.user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.anon_user = User.objects.create_user(email='<EMAIL>', password='<PASSWORD>')
self.unath_client = APIClient()
self.client = APIClient()
token, _ = Token.objects.get_or_create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION=f'Token {token.key}')
def test_perform_create(self):
"""register a new specialty"""
data = {'name': 'Cardiologia'}
response = self.unath_client.post(reverse('specialty-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.post(reverse('specialty-list'), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_list(self):
"""listing all specialties"""
SpecialtyFactory.create_batch(5)
response = self.unath_client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 5)
def test_retrieve(self):
"""details of a specific specialty"""
specialty = SpecialtyFactory.create(id=10)
response = self.unath_client.get(reverse('specialty-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-detail', args=[10]))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['name'], specialty.name)
def test_update(self):
"""methods not allowed returned error 405"""
specialty = SpecialtyFactory.create(id=21)
data = {'name': 'Joe'}
self.assertNotEqual(specialty.name, data['name'])
response = self.unath_client.put(reverse('specialty-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.put(reverse('specialty-detail', args=[21]), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_partial_update(self):
"""methods not allowed returned error 405"""
specialty = SpecialtyFactory.create(id=22)
data = {'name': 'Joe'}
self.assertNotEqual(specialty.name, data['name'])
response = self.unath_client.patch(reverse('specialty-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.patch(reverse('specialty-detail', args=[22]), data=data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_destroy(self):
"""methods not allowed returned error 405"""
SpecialtyFactory.create(id=15)
response = self.unath_client.get(reverse('specialty-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(len(response.data), 1)
response = self.client.delete(reverse('specialty-detail', args=[15]))
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
response = self.client.get(reverse('specialty-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 1)
| en | 0.76372 | register a new specialty listing all specialties details of a specific specialty methods not allowed returned error 405 methods not allowed returned error 405 methods not allowed returned error 405 | 2.488662 | 2 |
ExerciciosPythonMundo1/63. Multa de Velocidade.py | juniorppb/arquivos-python | 0 | 6612289 | <filename>ExerciciosPythonMundo1/63. Multa de Velocidade.py
radar = float(input('Qual foi a velocidade que você passou pelo radar? '))
multa = (radar - 80) * 7.00
if radar > 80:
print('Você excedeu a velocidade permitida.')
print('Sua multa será de R$ {:.2f}.'.format(multa))
else:
print('Você estava dentro da velocidade permitida.')
| <filename>ExerciciosPythonMundo1/63. Multa de Velocidade.py
radar = float(input('Qual foi a velocidade que você passou pelo radar? '))
multa = (radar - 80) * 7.00
if radar > 80:
print('Você excedeu a velocidade permitida.')
print('Sua multa será de R$ {:.2f}.'.format(multa))
else:
print('Você estava dentro da velocidade permitida.')
| none | 1 | 3.862461 | 4 | |
detectron2/modeling/backbone/pafpn.py | sm047/detectron2 | 5 | 6612290 | <reponame>sm047/detectron2<gh_stars>1-10
#!/usr/bin/env python3
# @Time : 18/5/20 11:41 AM
# @Author : fangcheng.ji
# @FileName: pafpn.py
import torch.nn.functional as F
import math
import fvcore.nn.weight_init as weight_init
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .fpn import FPN, LastLevelMaxPool, LastLevelP6P7, LastLevelP6
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
class PAFPN(FPN):
def __init__(self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"):
super().__init__(bottom_up, in_features, out_channels, norm, top_block, fuse_type)
self.norm = norm
up_convs = []
fuse_convs = []
use_bias = norm == ""
for idx in range(len(self._out_features) - 1):
up_norm = get_norm(norm, out_channels)
fuse_norm = get_norm(norm, out_channels)
up_conv = Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=use_bias, norm=up_norm
)
fuse_conv = Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=fuse_norm
)
weight_init.c2_xavier_fill(up_conv)
weight_init.c2_xavier_fill(fuse_conv)
stage = int(math.log2(self._out_feature_strides[self._out_features[idx]]))
self.add_module("pafpn_up{}".format(stage), up_conv)
self.add_module("pafpn_fuse{}".format(stage), fuse_conv)
up_convs.append(up_conv)
fuse_convs.append(fuse_conv)
self.up_convs = up_convs
self.fuse_convs = fuse_convs
self._size_divisibility = max([v for _, v in self._out_feature_strides.items()])
def forward(self, x):
# fpn forward
x = super().forward(x)
# pa fpn
x = [x[f] for f in self._out_features]
results = [x[0]]
for idx, (up_conv, fuse_conv) in enumerate(zip(self.up_convs, self.fuse_convs)):
up_feature = up_conv(x[idx])
if self.norm is not "":
up_feature = F.relu_(up_feature)
fuse_feature = fuse_conv(up_feature + x[idx + 1])
if self.norm is not "":
fuse_feature = F.relu_(fuse_feature)
results.append(fuse_feature)
assert len(self._out_features) == len(results)
return dict(zip(self._out_features, results))
@BACKBONE_REGISTRY.register()
def build_resnet_pafpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = PAFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_fcos_resnet_pafpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
top_levels = cfg.MODEL.FCOS.TOP_LEVELS
in_channels_top = out_channels
if top_levels == 2:
top_block = LastLevelP6P7(in_channels_top, out_channels, "p5")
if top_levels == 1:
top_block = LastLevelP6(in_channels_top, out_channels, "p5")
elif top_levels == 0:
top_block = None
backbone = PAFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=top_block,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | #!/usr/bin/env python3
# @Time : 18/5/20 11:41 AM
# @Author : fangcheng.ji
# @FileName: pafpn.py
import torch.nn.functional as F
import math
import fvcore.nn.weight_init as weight_init
from detectron2.layers import Conv2d, ShapeSpec, get_norm
from .fpn import FPN, LastLevelMaxPool, LastLevelP6P7, LastLevelP6
from .build import BACKBONE_REGISTRY
from .resnet import build_resnet_backbone
class PAFPN(FPN):
def __init__(self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum"):
super().__init__(bottom_up, in_features, out_channels, norm, top_block, fuse_type)
self.norm = norm
up_convs = []
fuse_convs = []
use_bias = norm == ""
for idx in range(len(self._out_features) - 1):
up_norm = get_norm(norm, out_channels)
fuse_norm = get_norm(norm, out_channels)
up_conv = Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=use_bias, norm=up_norm
)
fuse_conv = Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=use_bias, norm=fuse_norm
)
weight_init.c2_xavier_fill(up_conv)
weight_init.c2_xavier_fill(fuse_conv)
stage = int(math.log2(self._out_feature_strides[self._out_features[idx]]))
self.add_module("pafpn_up{}".format(stage), up_conv)
self.add_module("pafpn_fuse{}".format(stage), fuse_conv)
up_convs.append(up_conv)
fuse_convs.append(fuse_conv)
self.up_convs = up_convs
self.fuse_convs = fuse_convs
self._size_divisibility = max([v for _, v in self._out_feature_strides.items()])
def forward(self, x):
# fpn forward
x = super().forward(x)
# pa fpn
x = [x[f] for f in self._out_features]
results = [x[0]]
for idx, (up_conv, fuse_conv) in enumerate(zip(self.up_convs, self.fuse_convs)):
up_feature = up_conv(x[idx])
if self.norm is not "":
up_feature = F.relu_(up_feature)
fuse_feature = fuse_conv(up_feature + x[idx + 1])
if self.norm is not "":
fuse_feature = F.relu_(fuse_feature)
results.append(fuse_feature)
assert len(self._out_features) == len(results)
return dict(zip(self._out_features, results))
@BACKBONE_REGISTRY.register()
def build_resnet_pafpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
backbone = PAFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=LastLevelMaxPool(),
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone
@BACKBONE_REGISTRY.register()
def build_fcos_resnet_pafpn_backbone(cfg, input_shape: ShapeSpec):
"""
Args:
cfg: a detectron2 CfgNode
Returns:
backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
"""
bottom_up = build_resnet_backbone(cfg, input_shape)
in_features = cfg.MODEL.FPN.IN_FEATURES
out_channels = cfg.MODEL.FPN.OUT_CHANNELS
top_levels = cfg.MODEL.FCOS.TOP_LEVELS
in_channels_top = out_channels
if top_levels == 2:
top_block = LastLevelP6P7(in_channels_top, out_channels, "p5")
if top_levels == 1:
top_block = LastLevelP6(in_channels_top, out_channels, "p5")
elif top_levels == 0:
top_block = None
backbone = PAFPN(
bottom_up=bottom_up,
in_features=in_features,
out_channels=out_channels,
norm=cfg.MODEL.FPN.NORM,
top_block=top_block,
fuse_type=cfg.MODEL.FPN.FUSE_TYPE,
)
return backbone | en | 0.247357 | #!/usr/bin/env python3 # @Time : 18/5/20 11:41 AM # @Author : fangcheng.ji # @FileName: pafpn.py # fpn forward # pa fpn Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. | 1.766229 | 2 |
tests/meltano/cli/test_state.py | Mu-L/meltano | 0 | 6612291 | <gh_stars>0
import json
import os
import sys
from unittest import mock
import pytest
from asserts import assert_cli_runner
from meltano.cli import cli, state
from meltano.core.utils import merge
unconventional_job_ids = [
"unconventional",
"dev:tap-and-target",
"tap-mock-to-target-mock",
"staging:",
"staging:tap-mock-to-",
"dev:-to-target-mock",
]
conventional_job_ids = ["dev:tap-mock-to-target-mock", "staging:mock-to-mock"]
class TestCliState:
@pytest.mark.parametrize("job_id", unconventional_job_ids)
def test_state_service_from_job_id_returns_none_non_convention( # noqa: WPS118
self, project, job_id
):
assert state.state_service_from_job_id(project, job_id) is None
@pytest.mark.parametrize("job_id", conventional_job_ids)
def test_state_service_from_job_id_returns_state_service_convention( # noqa: WPS118
self, project, job_id
):
with mock.patch(
"meltano.cli.state.BlockParser",
autospec=True,
) as mock_block_parser:
state.state_service_from_job_id(project, job_id)
args = job_id.split(":")[1].split("-to-")
if sys.version_info >= (3, 8):
assert args in mock_block_parser.call_args.args
else:
assert args in mock_block_parser.call_args[0]
@staticmethod
def get_result_set(result):
result_set = set(result.stdout.split("\n"))
result_set.remove("")
return result_set
def test_list(self, project, job_ids, state_service, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
result = cli_runner.invoke(cli, ["state", "list"])
assert_cli_runner(result)
assert self.get_result_set(result) == set(job_ids)
@pytest.fixture
def patterns_with_expected_results(self, job_ids):
return [
(
"test:*",
set(filter(lambda job_id: job_id.startswith("test:"), list(job_ids))),
),
("*-to-*", set(job_ids)),
("multiple-complete", set()),
(
"*multiple-complete",
set(
filter(
lambda job_id: job_id.endswith("multiple-complete"),
list(job_ids),
)
),
),
]
def test_list_pattern(
self, state_service, cli_runner, patterns_with_expected_results
):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for (pattern, expected_result) in patterns_with_expected_results:
result = cli_runner.invoke(cli, ["state", "list", pattern])
assert_cli_runner(result)
assert self.get_result_set(result) == expected_result
def test_set_from_string(self, state_service, job_ids, payloads, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for job_id in job_ids:
for state_payload in payloads.mock_state_payloads:
result = cli_runner.invoke(
cli,
[
"state",
"set",
"--force",
job_id,
json.dumps(state_payload),
],
)
assert_cli_runner(result)
assert state_service.get_state(job_id) == state_payload
def test_set_from_file(self, mkdtemp, state_service, job_ids, payloads, cli_runner):
tmp_path = mkdtemp()
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for idx_i, job_id in enumerate(job_ids):
for idx_j, state_payload in enumerate(payloads.mock_state_payloads):
filepath = os.path.join(
tmp_path, f"state-file-{idx_i}-{idx_j}.json"
)
with open(filepath, "w+") as state_file:
json.dump(state_payload, state_file)
result = cli_runner.invoke(
cli,
["state", "set", "--force", job_id, "--input-file", filepath],
)
assert_cli_runner(result)
assert state_service.get_state(job_id) == state_payload
def test_merge_from_string(self, state_service, job_ids, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_src_state = state_service.get_state(job_src)
job_dst_state = state_service.get_state(job_dst)
result = cli_runner.invoke(
cli,
[
"state",
"merge",
job_dst,
json.dumps(job_src_state),
],
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merge(
job_src_state, job_dst_state
)
def test_merge_from_file(
self, mkdtemp, state_service, job_ids, payloads, cli_runner
):
tmp_path = mkdtemp()
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_src_state = state_service.get_state(job_src)
job_dst_state = state_service.get_state(job_dst)
filepath = os.path.join(tmp_path, f"{job_src}-{job_dst}")
with open(filepath, "w+") as state_file:
json.dump(job_src_state, state_file)
result = cli_runner.invoke(
cli,
["state", "merge", job_dst, "--input-file", filepath],
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merge(
job_src_state, job_dst_state
)
def test_merge_from_job(self, state_service, job_ids, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_state_src = state_service.get_state(job_src)
job_state_dst = state_service.get_state(job_dst)
merged_state = merge(job_state_src, job_state_dst)
result = cli_runner.invoke(
cli, ["state", "merge", "--from-job-id", job_src, job_dst]
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merged_state
def test_get(self, state_service, cli_runner, job_ids_with_expected_states):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for (job_id, expected_state) in job_ids_with_expected_states:
result = cli_runner.invoke(cli, ["state", "get", job_id])
assert_cli_runner(result)
assert json.loads(result.stdout) == expected_state
def test_clear(self, state_service, cli_runner, job_ids):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for job_id in job_ids:
result = cli_runner.invoke(cli, ["state", "clear", "--force", job_id])
assert_cli_runner(result)
job_state = state_service.get_state(job_id)
assert (not job_state) or (not job_state.get("singer_state"))
| import json
import os
import sys
from unittest import mock
import pytest
from asserts import assert_cli_runner
from meltano.cli import cli, state
from meltano.core.utils import merge
unconventional_job_ids = [
"unconventional",
"dev:tap-and-target",
"tap-mock-to-target-mock",
"staging:",
"staging:tap-mock-to-",
"dev:-to-target-mock",
]
conventional_job_ids = ["dev:tap-mock-to-target-mock", "staging:mock-to-mock"]
class TestCliState:
@pytest.mark.parametrize("job_id", unconventional_job_ids)
def test_state_service_from_job_id_returns_none_non_convention( # noqa: WPS118
self, project, job_id
):
assert state.state_service_from_job_id(project, job_id) is None
@pytest.mark.parametrize("job_id", conventional_job_ids)
def test_state_service_from_job_id_returns_state_service_convention( # noqa: WPS118
self, project, job_id
):
with mock.patch(
"meltano.cli.state.BlockParser",
autospec=True,
) as mock_block_parser:
state.state_service_from_job_id(project, job_id)
args = job_id.split(":")[1].split("-to-")
if sys.version_info >= (3, 8):
assert args in mock_block_parser.call_args.args
else:
assert args in mock_block_parser.call_args[0]
@staticmethod
def get_result_set(result):
result_set = set(result.stdout.split("\n"))
result_set.remove("")
return result_set
def test_list(self, project, job_ids, state_service, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
result = cli_runner.invoke(cli, ["state", "list"])
assert_cli_runner(result)
assert self.get_result_set(result) == set(job_ids)
@pytest.fixture
def patterns_with_expected_results(self, job_ids):
return [
(
"test:*",
set(filter(lambda job_id: job_id.startswith("test:"), list(job_ids))),
),
("*-to-*", set(job_ids)),
("multiple-complete", set()),
(
"*multiple-complete",
set(
filter(
lambda job_id: job_id.endswith("multiple-complete"),
list(job_ids),
)
),
),
]
def test_list_pattern(
self, state_service, cli_runner, patterns_with_expected_results
):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for (pattern, expected_result) in patterns_with_expected_results:
result = cli_runner.invoke(cli, ["state", "list", pattern])
assert_cli_runner(result)
assert self.get_result_set(result) == expected_result
def test_set_from_string(self, state_service, job_ids, payloads, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for job_id in job_ids:
for state_payload in payloads.mock_state_payloads:
result = cli_runner.invoke(
cli,
[
"state",
"set",
"--force",
job_id,
json.dumps(state_payload),
],
)
assert_cli_runner(result)
assert state_service.get_state(job_id) == state_payload
def test_set_from_file(self, mkdtemp, state_service, job_ids, payloads, cli_runner):
tmp_path = mkdtemp()
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for idx_i, job_id in enumerate(job_ids):
for idx_j, state_payload in enumerate(payloads.mock_state_payloads):
filepath = os.path.join(
tmp_path, f"state-file-{idx_i}-{idx_j}.json"
)
with open(filepath, "w+") as state_file:
json.dump(state_payload, state_file)
result = cli_runner.invoke(
cli,
["state", "set", "--force", job_id, "--input-file", filepath],
)
assert_cli_runner(result)
assert state_service.get_state(job_id) == state_payload
def test_merge_from_string(self, state_service, job_ids, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_src_state = state_service.get_state(job_src)
job_dst_state = state_service.get_state(job_dst)
result = cli_runner.invoke(
cli,
[
"state",
"merge",
job_dst,
json.dumps(job_src_state),
],
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merge(
job_src_state, job_dst_state
)
def test_merge_from_file(
self, mkdtemp, state_service, job_ids, payloads, cli_runner
):
tmp_path = mkdtemp()
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_src_state = state_service.get_state(job_src)
job_dst_state = state_service.get_state(job_dst)
filepath = os.path.join(tmp_path, f"{job_src}-{job_dst}")
with open(filepath, "w+") as state_file:
json.dump(job_src_state, state_file)
result = cli_runner.invoke(
cli,
["state", "merge", job_dst, "--input-file", filepath],
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merge(
job_src_state, job_dst_state
)
def test_merge_from_job(self, state_service, job_ids, cli_runner):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
job_pairs = []
for idx in range(0, len(job_ids) - 1, 2):
job_pairs.append((job_ids[idx], job_ids[idx + 1]))
for (job_src, job_dst) in job_pairs:
job_state_src = state_service.get_state(job_src)
job_state_dst = state_service.get_state(job_dst)
merged_state = merge(job_state_src, job_state_dst)
result = cli_runner.invoke(
cli, ["state", "merge", "--from-job-id", job_src, job_dst]
)
assert_cli_runner(result)
assert state_service.get_state(job_dst) == merged_state
def test_get(self, state_service, cli_runner, job_ids_with_expected_states):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for (job_id, expected_state) in job_ids_with_expected_states:
result = cli_runner.invoke(cli, ["state", "get", job_id])
assert_cli_runner(result)
assert json.loads(result.stdout) == expected_state
def test_clear(self, state_service, cli_runner, job_ids):
with mock.patch("meltano.cli.state.StateService", return_value=state_service):
for job_id in job_ids:
result = cli_runner.invoke(cli, ["state", "clear", "--force", job_id])
assert_cli_runner(result)
job_state = state_service.get_state(job_id)
assert (not job_state) or (not job_state.get("singer_state")) | en | 0.50572 | # noqa: WPS118 # noqa: WPS118 | 2.122635 | 2 |
Practice/petya.py | ashishjayamohan/competitive-programming | 0 | 6612292 | line1 = input().lower()
line2 = input().lower()
if(line2>line1):
print("-1")
elif(line1>line2):
print("1")
else:
print("0")
| line1 = input().lower()
line2 = input().lower()
if(line2>line1):
print("-1")
elif(line1>line2):
print("1")
else:
print("0")
| none | 1 | 3.690825 | 4 | |
fm_zoo/nfm.py | RoetGer/tf2-fm-zoo | 1 | 6612293 | <gh_stars>1-10
import tensorflow as tf
from fm_zoo.common import LinearModel, EmbedFeatures, FullyConnectedNetwork
class NeuralFactorizationMachine(tf.keras.Model):
"""Implementation of Neural Factorization Machines.
Reference: https://arxiv.org/abs/1708.05027
Contrast to FNN, the fully connected network takes in the pooled pairwise interactions, and the model keeps the
linear part.
"""
def __init__(self, feature_cards, factor_dim, hidden_sizes, dropout_rate=.1, prior=None, name='neural_factorization_machine'):
super(NeuralFactorizationMachine, self).__init__(name=name)
self.embedding = EmbedFeatures(feature_cards, factor_dim, prior=prior, name=name + '/feature_embedding')
self.linear = LinearModel(feature_cards, prior=prior, name=name + '/linear_model')
self.nn = FullyConnectedNetwork(units=hidden_sizes, dropout_rate=dropout_rate, name=name + '/fcn')
def call(self, x, training=False):
linear_out = self.linear(x)
factors = self.embedding(x)
sum_of_squares = tf.reduce_sum(tf.pow(factors, 2), 1)
square_of_sums = tf.pow(tf.reduce_sum(factors, 1), 2)
pooled_interactions = square_of_sums - sum_of_squares
interaction_out = self.nn(pooled_interactions, training=training)
return linear_out + interaction_out
| import tensorflow as tf
from fm_zoo.common import LinearModel, EmbedFeatures, FullyConnectedNetwork
class NeuralFactorizationMachine(tf.keras.Model):
"""Implementation of Neural Factorization Machines.
Reference: https://arxiv.org/abs/1708.05027
Contrast to FNN, the fully connected network takes in the pooled pairwise interactions, and the model keeps the
linear part.
"""
def __init__(self, feature_cards, factor_dim, hidden_sizes, dropout_rate=.1, prior=None, name='neural_factorization_machine'):
super(NeuralFactorizationMachine, self).__init__(name=name)
self.embedding = EmbedFeatures(feature_cards, factor_dim, prior=prior, name=name + '/feature_embedding')
self.linear = LinearModel(feature_cards, prior=prior, name=name + '/linear_model')
self.nn = FullyConnectedNetwork(units=hidden_sizes, dropout_rate=dropout_rate, name=name + '/fcn')
def call(self, x, training=False):
linear_out = self.linear(x)
factors = self.embedding(x)
sum_of_squares = tf.reduce_sum(tf.pow(factors, 2), 1)
square_of_sums = tf.pow(tf.reduce_sum(factors, 1), 2)
pooled_interactions = square_of_sums - sum_of_squares
interaction_out = self.nn(pooled_interactions, training=training)
return linear_out + interaction_out | en | 0.79503 | Implementation of Neural Factorization Machines. Reference: https://arxiv.org/abs/1708.05027 Contrast to FNN, the fully connected network takes in the pooled pairwise interactions, and the model keeps the linear part. | 3.090017 | 3 |
utils/upload_data.py | aevtikheev/elasticpath-shop-bot | 0 | 6612294 | """Ad hoc data upload functions for Elasticpath Shop Bot."""
import json
import os
import tempfile
import httpx
from slugify import slugify
from elasticpath.api import ElasticpathAPI
from elasticpath.models import Product
from settings import settings
def upload_products(products_file_name: str) -> None:
"""Read file with products data and create those products in Elasticpath."""
with open(products_file_name, 'r') as products_file:
products_json = json.load(products_file)
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
for product_data in products_json:
product = elasticpath_api.products.create_product(
name=product_data['name'],
sku=product_data['name'],
slug=slugify(product_data['name']),
manage_stock=False,
description=product_data['description'],
price_amount=product_data['price'],
price_currency='RUB',
price_includes_tax=True,
status='live',
commodity_type='physical',
)
add_picture_for_product(product, product_data['product_image']['url'])
def add_picture_for_product(product: Product, picture_url: str) -> None:
"""Upload picture to Elasticpath and assign it as a main image for a product."""
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
product_picture = httpx.get(picture_url).content
picture_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
picture_file.write(product_picture)
picture_file.close()
elasticpath_file = elasticpath_api.files.create_file(picture_file.name)
elasticpath_api.products.add_main_image_to_product(elasticpath_file, product)
os.remove(picture_file.name)
def upload_shops(shops_file_name: str, flow_slug: str) -> None:
"""Read file with shops data and upload that data to Elasticpath."""
with open(shops_file_name, 'r') as shops_file:
shops_json = json.load(shops_file)
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
for shop_data in shops_json:
elasticpath_api.flows.create_entry(
flow=flow_slug,
fields={
'Address': shop_data['address']['full'],
'Alias': shop_data['alias'],
'Longitude': shop_data['coordinates']['lon'],
'Latitude': shop_data['coordinates']['lat'],
},
)
| """Ad hoc data upload functions for Elasticpath Shop Bot."""
import json
import os
import tempfile
import httpx
from slugify import slugify
from elasticpath.api import ElasticpathAPI
from elasticpath.models import Product
from settings import settings
def upload_products(products_file_name: str) -> None:
"""Read file with products data and create those products in Elasticpath."""
with open(products_file_name, 'r') as products_file:
products_json = json.load(products_file)
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
for product_data in products_json:
product = elasticpath_api.products.create_product(
name=product_data['name'],
sku=product_data['name'],
slug=slugify(product_data['name']),
manage_stock=False,
description=product_data['description'],
price_amount=product_data['price'],
price_currency='RUB',
price_includes_tax=True,
status='live',
commodity_type='physical',
)
add_picture_for_product(product, product_data['product_image']['url'])
def add_picture_for_product(product: Product, picture_url: str) -> None:
"""Upload picture to Elasticpath and assign it as a main image for a product."""
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
product_picture = httpx.get(picture_url).content
picture_file = tempfile.NamedTemporaryFile(delete=False, suffix='.jpg')
picture_file.write(product_picture)
picture_file.close()
elasticpath_file = elasticpath_api.files.create_file(picture_file.name)
elasticpath_api.products.add_main_image_to_product(elasticpath_file, product)
os.remove(picture_file.name)
def upload_shops(shops_file_name: str, flow_slug: str) -> None:
"""Read file with shops data and upload that data to Elasticpath."""
with open(shops_file_name, 'r') as shops_file:
shops_json = json.load(shops_file)
elasticpath_api = ElasticpathAPI(
client_id=settings.elasticpath_client_id,
client_secret=settings.elasticpath_client_secret,
)
for shop_data in shops_json:
elasticpath_api.flows.create_entry(
flow=flow_slug,
fields={
'Address': shop_data['address']['full'],
'Alias': shop_data['alias'],
'Longitude': shop_data['coordinates']['lon'],
'Latitude': shop_data['coordinates']['lat'],
},
)
| en | 0.948191 | Ad hoc data upload functions for Elasticpath Shop Bot. Read file with products data and create those products in Elasticpath. Upload picture to Elasticpath and assign it as a main image for a product. Read file with shops data and upload that data to Elasticpath. | 2.700736 | 3 |
source/Motor.py | FelixTheoret/Ergocycle | 0 | 6612295 | <filename>source/Motor.py
# Motor class
from __future__ import print_function
import odrive
#import numpy
from odrive.enums import *
import time
import math
# from abc import ABC, abstractmethod
# from sqlalchemy import false
class Motor():
# Constuctor
def __init__(self, nom, kp, ki, T, couple, vitesse, val_max, val_min, duree_ent): #, carte : odrive) :
self._nom = nom
self._carte = []
self._kp = kp
self._ki = ki
self._couple = couple
self._vitesse = vitesse
self._dt = T
self._val_max = val_max
self._val_min = val_min
self._est_concentrique = True
self._est_excentrique = False
self._couple_usager = 0
self._duree = duree_ent
print("moteur construit")
def __del__(self):
print("moteur detruit")
def calibratre_motor(self):
# Find a connected ODrive (this will block until you connect one)
print("finding an odrive...")
my_drive = odrive.find_any()
print("odrive found")
# Calibrate motor and wait for it to finish
print("starting calibration...")
my_drive.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while my_drive.axis0.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
return my_drive
def get_force_usager (self):
self._force_usager += 1 #récupération du torque de l'usager
def concentric_mode(self):
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL #pour démarrer le moteur
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL #set le mode torque
self._carte.axis0.motor.config.torque_constant = 0.21
self._carte.axis0.controller.input_torque = self._couple #set le couple normalement en Nm.
start = time.time()
end = time.time()
#PI
erreur = self._couple_usager - self._couple
#Modification de la condition du while
#variation_couple_usager = self.couple_cible - self._couple_usager
#seuil_acceptabilite = 0.1*self.couple_cible
compte = 0
#Asservissement
start = time.time()
end = time.time()
while (end - start) <= self._duree :
seconde = end - start
end = time.time()
#while abs(variation_couple_usager) >= seuil_accceptabilite and self._est_concentrique
print("Dans la boucle du temps")
while abs(erreur) >= 5 and self._est_concentrique :
end = time.time()
seconde = end - start
erreur = self._couple_usager - self._couple
P_o = self._kp*erreur
integral = erreur*self._dt
I_o = self._ki * integral
controller = P_o + I_o
if controller > self._val_max :
controller = self._val_max
elif controller < self._val_min :
controller = self._val_min
self._couple += controller
compte += 1
print("torque", compte)
print(self._couple)
#self._carte.axis0.controller.input_torque = self._force
if (end - start) >= self._duree :
break
print("mode concentrique")
def eccentric_mode(self):
#initialisation du projet
self._carte.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while self._carte.axis0.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL
self._carte.axis0.motor.config.torque_constant = 8.23 / 150
self._carte.axis0.controller.input_vel = self._vitesse
self._carte.axis0.controller.input_torque = self._force
#PI
#Asservissement
erreur_excentrique = self._force_usager - self._force
compte = 0
while erreur_excentrique <= -5 and self._est_excentrique :
erreur_excentrique = self._force_usager - self._force
P_o = self._kp*erreur_excentrique
integral = erreur_excentrique*self._dt
I_o = self._ki * integral
controller = P_o + I_o
if controller > self._val_max :
controller = self._val_max
elif controller < self._val_min :
controller = self._val_min
self._force += controller
compte += 1
print("torque", compte)
print(self._force)
#self._carte.axis0.controller.input_torque = self._force
print("mode excentrique")
def passif_mode(self) :
start = time.time()
end = time.time()
while (end - start) <= 30 :
end = time.time()
new_time = time.time()
new_end_time = time.time()
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL #pour démarrer le moteur
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL #set le mode torque
self._carte.axis0.motor.config.torque_constant = 0.21
self._carte.axis0.controller.input_torque = self._couple #set le couple normalement en Nm. scale en 0 et 1
while (new_end_time - new_time) <= 30 :
new_end_time = time.time()
vitesse = self._carte.axis0.encoder.vel_estimate
courant = self._carte.axis0.motor.current_control.Iq_setpoint
puissance = self._couple * vitesse
print("puissance en watt :", puissance)
self._carte.axis0.controller.input_torque = 0.0
#test
# moteur = Motor('tsdz2', 0.1 , 0.5, 0.1, 3 , 50, 35, -35, 30)
# type = moteur._nom
# print ("force usager debut: " , moteur._couple_usager)
# #print("le moteur est de type", type)
# moteur._couple_usager = 25
# #print ("force usager apres: ", moteur._force_usager)
# #print ("test")
# # erreur_test = moteur._force - moteur._force_usager
# # print(erreur_test)
# moteur.concentric_mode()
#a = moteur._force_usager
#moteur.get_force_usager()
#b = moteur._force_usager
#print(a)
#print(b)
#moteur.concentric_mode()
| <filename>source/Motor.py
# Motor class
from __future__ import print_function
import odrive
#import numpy
from odrive.enums import *
import time
import math
# from abc import ABC, abstractmethod
# from sqlalchemy import false
class Motor():
# Constuctor
def __init__(self, nom, kp, ki, T, couple, vitesse, val_max, val_min, duree_ent): #, carte : odrive) :
self._nom = nom
self._carte = []
self._kp = kp
self._ki = ki
self._couple = couple
self._vitesse = vitesse
self._dt = T
self._val_max = val_max
self._val_min = val_min
self._est_concentrique = True
self._est_excentrique = False
self._couple_usager = 0
self._duree = duree_ent
print("moteur construit")
def __del__(self):
print("moteur detruit")
def calibratre_motor(self):
# Find a connected ODrive (this will block until you connect one)
print("finding an odrive...")
my_drive = odrive.find_any()
print("odrive found")
# Calibrate motor and wait for it to finish
print("starting calibration...")
my_drive.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while my_drive.axis0.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
return my_drive
def get_force_usager (self):
self._force_usager += 1 #récupération du torque de l'usager
def concentric_mode(self):
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL #pour démarrer le moteur
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL #set le mode torque
self._carte.axis0.motor.config.torque_constant = 0.21
self._carte.axis0.controller.input_torque = self._couple #set le couple normalement en Nm.
start = time.time()
end = time.time()
#PI
erreur = self._couple_usager - self._couple
#Modification de la condition du while
#variation_couple_usager = self.couple_cible - self._couple_usager
#seuil_acceptabilite = 0.1*self.couple_cible
compte = 0
#Asservissement
start = time.time()
end = time.time()
while (end - start) <= self._duree :
seconde = end - start
end = time.time()
#while abs(variation_couple_usager) >= seuil_accceptabilite and self._est_concentrique
print("Dans la boucle du temps")
while abs(erreur) >= 5 and self._est_concentrique :
end = time.time()
seconde = end - start
erreur = self._couple_usager - self._couple
P_o = self._kp*erreur
integral = erreur*self._dt
I_o = self._ki * integral
controller = P_o + I_o
if controller > self._val_max :
controller = self._val_max
elif controller < self._val_min :
controller = self._val_min
self._couple += controller
compte += 1
print("torque", compte)
print(self._couple)
#self._carte.axis0.controller.input_torque = self._force
if (end - start) >= self._duree :
break
print("mode concentrique")
def eccentric_mode(self):
#initialisation du projet
self._carte.axis0.requested_state = AXIS_STATE_FULL_CALIBRATION_SEQUENCE
while self._carte.axis0.current_state != AXIS_STATE_IDLE:
time.sleep(0.1)
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL
self._carte.axis0.motor.config.torque_constant = 8.23 / 150
self._carte.axis0.controller.input_vel = self._vitesse
self._carte.axis0.controller.input_torque = self._force
#PI
#Asservissement
erreur_excentrique = self._force_usager - self._force
compte = 0
while erreur_excentrique <= -5 and self._est_excentrique :
erreur_excentrique = self._force_usager - self._force
P_o = self._kp*erreur_excentrique
integral = erreur_excentrique*self._dt
I_o = self._ki * integral
controller = P_o + I_o
if controller > self._val_max :
controller = self._val_max
elif controller < self._val_min :
controller = self._val_min
self._force += controller
compte += 1
print("torque", compte)
print(self._force)
#self._carte.axis0.controller.input_torque = self._force
print("mode excentrique")
def passif_mode(self) :
start = time.time()
end = time.time()
while (end - start) <= 30 :
end = time.time()
new_time = time.time()
new_end_time = time.time()
self._carte.axis0.requested_state = AXIS_STATE_CLOSED_LOOP_CONTROL #pour démarrer le moteur
self._carte.axis0.controller.config.control_mode = CONTROL_MODE_TORQUE_CONTROL #set le mode torque
self._carte.axis0.motor.config.torque_constant = 0.21
self._carte.axis0.controller.input_torque = self._couple #set le couple normalement en Nm. scale en 0 et 1
while (new_end_time - new_time) <= 30 :
new_end_time = time.time()
vitesse = self._carte.axis0.encoder.vel_estimate
courant = self._carte.axis0.motor.current_control.Iq_setpoint
puissance = self._couple * vitesse
print("puissance en watt :", puissance)
self._carte.axis0.controller.input_torque = 0.0
#test
# moteur = Motor('tsdz2', 0.1 , 0.5, 0.1, 3 , 50, 35, -35, 30)
# type = moteur._nom
# print ("force usager debut: " , moteur._couple_usager)
# #print("le moteur est de type", type)
# moteur._couple_usager = 25
# #print ("force usager apres: ", moteur._force_usager)
# #print ("test")
# # erreur_test = moteur._force - moteur._force_usager
# # print(erreur_test)
# moteur.concentric_mode()
#a = moteur._force_usager
#moteur.get_force_usager()
#b = moteur._force_usager
#print(a)
#print(b)
#moteur.concentric_mode()
| fr | 0.365488 | # Motor class #import numpy # from abc import ABC, abstractmethod # from sqlalchemy import false # Constuctor #, carte : odrive) : # Find a connected ODrive (this will block until you connect one) # Calibrate motor and wait for it to finish #récupération du torque de l'usager #pour démarrer le moteur #set le mode torque #set le couple normalement en Nm. #PI #Modification de la condition du while #variation_couple_usager = self.couple_cible - self._couple_usager #seuil_acceptabilite = 0.1*self.couple_cible #Asservissement #while abs(variation_couple_usager) >= seuil_accceptabilite and self._est_concentrique #self._carte.axis0.controller.input_torque = self._force #initialisation du projet #PI #Asservissement #self._carte.axis0.controller.input_torque = self._force #pour démarrer le moteur #set le mode torque #set le couple normalement en Nm. scale en 0 et 1 #test # moteur = Motor('tsdz2', 0.1 , 0.5, 0.1, 3 , 50, 35, -35, 30) # type = moteur._nom # print ("force usager debut: " , moteur._couple_usager) # #print("le moteur est de type", type) # moteur._couple_usager = 25 # #print ("force usager apres: ", moteur._force_usager) # #print ("test") # # erreur_test = moteur._force - moteur._force_usager # # print(erreur_test) # moteur.concentric_mode() #a = moteur._force_usager #moteur.get_force_usager() #b = moteur._force_usager #print(a) #print(b) #moteur.concentric_mode() | 2.704351 | 3 |
setup.py | CipherChen/sesame | 8 | 6612296 | import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sesame",
version="1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Passwords in my own.",
url="https://github.com/CipherChen/sesame",
packages=setuptools.find_packages(),
scripts=["bin/sesame"],
install_requires=[
"pycrypto>=2.6.1",
],
data_files=[
("%s/.sesame" % os.environ["HOME"], ["default/config.yaml"]),
],
)
| import os
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="sesame",
version="1.0",
author="<NAME>",
author_email="<EMAIL>",
description="Passwords in my own.",
url="https://github.com/CipherChen/sesame",
packages=setuptools.find_packages(),
scripts=["bin/sesame"],
install_requires=[
"pycrypto>=2.6.1",
],
data_files=[
("%s/.sesame" % os.environ["HOME"], ["default/config.yaml"]),
],
)
| none | 1 | 1.478148 | 1 | |
day04/run_p1.py | manu8989/AoC19 | 1 | 6612297 | #!/usr/bin/env python
import os
import sys
import math
def test_two_adjacent_numbers_exist_example1():
assert two_adjacent_numbers_exist(123789) == False
def test_two_adjacent_numbers_exist_example2():
assert two_adjacent_numbers_exist(111111) == True
def test_two_adjacent_numbers_exist_example3():
assert two_adjacent_numbers_exist(135529) == True
def test_do_numbers_increase_example1():
assert do_numbers_increase(123789) == True
def test_do_numbers_increase_example2():
assert do_numbers_increase(111111) == True
def test_do_numbers_increase_example3():
assert do_numbers_increase(135529) == False
def two_adjacent_numbers_exist(number):
return any([digits in str(number) for digits in ["00","11","22","33","44","55","66","77","88","99"]])
def do_numbers_increase(number):
for i in range(len(str(number))-1):
if(int(str(number)[i]) > int(str(number)[i+1])):
return False
return True
if __name__ == "__main__":
lower_boundary = 372037
upper_boundary = 905157
cnt = 0
for number in range(lower_boundary, upper_boundary+1):
if two_adjacent_numbers_exist(number) and do_numbers_increase(number):
cnt += 1
print(cnt)
| #!/usr/bin/env python
import os
import sys
import math
def test_two_adjacent_numbers_exist_example1():
assert two_adjacent_numbers_exist(123789) == False
def test_two_adjacent_numbers_exist_example2():
assert two_adjacent_numbers_exist(111111) == True
def test_two_adjacent_numbers_exist_example3():
assert two_adjacent_numbers_exist(135529) == True
def test_do_numbers_increase_example1():
assert do_numbers_increase(123789) == True
def test_do_numbers_increase_example2():
assert do_numbers_increase(111111) == True
def test_do_numbers_increase_example3():
assert do_numbers_increase(135529) == False
def two_adjacent_numbers_exist(number):
return any([digits in str(number) for digits in ["00","11","22","33","44","55","66","77","88","99"]])
def do_numbers_increase(number):
for i in range(len(str(number))-1):
if(int(str(number)[i]) > int(str(number)[i+1])):
return False
return True
if __name__ == "__main__":
lower_boundary = 372037
upper_boundary = 905157
cnt = 0
for number in range(lower_boundary, upper_boundary+1):
if two_adjacent_numbers_exist(number) and do_numbers_increase(number):
cnt += 1
print(cnt)
| ru | 0.26433 | #!/usr/bin/env python | 4.029724 | 4 |
id_identify.py | zhz03/Opti-track_data_streaming | 0 | 6612298 | <gh_stars>0
import Optitrack as OptiT
import numpy as np
import time
import sys
def id_parse():
id1 = 0
id2 = 0
id3 = 0
current_time = time.time()
begin_time = time.time()
count = 0
while count ==0:
pass
if time.time() - current_time > 0.01:
ID = op.id
if id1 == 0:
a = ID
id1 = a
elif id1 != 0 and id2 == 0:
b = ID
if b != id1:
id2 = b
elif id1 != 0 and id2 != 0:
c = ID
if c != id1 and c != id2:
id3 = c
current_time = time.time()
if time.time() - begin_time >= 5:
count +=1
print("id parsing finish")
"""
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
"""
return id1,id2,id3
if __name__ == '__main__':
op = OptiT.OptiTrack()
current_time = time.time()
position1 = []
position2 = []
position3 = []
time1 = []
time2 = []
time3 = []
id1 = 0
id2 = 0
id3 = 0
id1,id2,id3 = id_parse()
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
"""
current_time = time.time()
begin_time = time.time()
count = 0
while count ==0:
pass
if time.time() - current_time > 0.01:
ID = op.id
if id1 == 0:
a = ID
id1 = a
elif id1 != 0 and id2 == 0:
b = ID
if b != id1:
id2 = b
elif id1 != 0 and id2 != 0:
c = ID
if c != id1 and c != id2:
id3 = c
current_time = time.time()
if time.time() - begin_time >= 5:
count +=1
print("id parsing finish")
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
"""
| import Optitrack as OptiT
import numpy as np
import time
import sys
def id_parse():
id1 = 0
id2 = 0
id3 = 0
current_time = time.time()
begin_time = time.time()
count = 0
while count ==0:
pass
if time.time() - current_time > 0.01:
ID = op.id
if id1 == 0:
a = ID
id1 = a
elif id1 != 0 and id2 == 0:
b = ID
if b != id1:
id2 = b
elif id1 != 0 and id2 != 0:
c = ID
if c != id1 and c != id2:
id3 = c
current_time = time.time()
if time.time() - begin_time >= 5:
count +=1
print("id parsing finish")
"""
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
"""
return id1,id2,id3
if __name__ == '__main__':
op = OptiT.OptiTrack()
current_time = time.time()
position1 = []
position2 = []
position3 = []
time1 = []
time2 = []
time3 = []
id1 = 0
id2 = 0
id3 = 0
id1,id2,id3 = id_parse()
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
"""
current_time = time.time()
begin_time = time.time()
count = 0
while count ==0:
pass
if time.time() - current_time > 0.01:
ID = op.id
if id1 == 0:
a = ID
id1 = a
elif id1 != 0 and id2 == 0:
b = ID
if b != id1:
id2 = b
elif id1 != 0 and id2 != 0:
c = ID
if c != id1 and c != id2:
id3 = c
current_time = time.time()
if time.time() - begin_time >= 5:
count +=1
print("id parsing finish")
print(f"id1:",id1)
print(f"id2:",id2)
print(f"id3:",id3)
""" | en | 0.453773 | print(f"id1:",id1) print(f"id2:",id2) print(f"id3:",id3) current_time = time.time() begin_time = time.time() count = 0 while count ==0: pass if time.time() - current_time > 0.01: ID = op.id if id1 == 0: a = ID id1 = a elif id1 != 0 and id2 == 0: b = ID if b != id1: id2 = b elif id1 != 0 and id2 != 0: c = ID if c != id1 and c != id2: id3 = c current_time = time.time() if time.time() - begin_time >= 5: count +=1 print("id parsing finish") print(f"id1:",id1) print(f"id2:",id2) print(f"id3:",id3) | 3.069432 | 3 |
app/improving_agent/src/workflows/__init__.py | brettasmi/EvidARA | 0 | 6612299 | WORKFLOW_LOOKUP = 'lookup'
SUPPORTED_WORKFLOWS = [
WORKFLOW_LOOKUP
]
| WORKFLOW_LOOKUP = 'lookup'
SUPPORTED_WORKFLOWS = [
WORKFLOW_LOOKUP
]
| none | 1 | 1.117177 | 1 | |
source/python3/directory_checker.py | spade-as-in-ace/SignatureBasedIDS | 1 | 6612300 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
import os
import sys
import time
import file_checker
DELAY = 1800 # check every 30 minutes
def check_directory(directory_path, last_check):
if directory_path[-1] != "/":
directory_path += "/"
directory = os.listdir(directory_path)
subdirectories = []
for f in directory:
path = f"{directory_path}{f}"
if os.path.isfile(path):
if last_check < datetime.fromtimestamp(os.path.getmtime(path)):
print(path)
file_checker.check_file(path)
else:
subdirectories.append(path)
for sub_dir in subdirectories:
check_directory(sub_dir, last_check)
def main(directory_path):
last_check = datetime.min
while True:
check_directory(directory_path, last_check)
last_check = datetime.now()
time.sleep(DELAY) # check directory every 30 min
if __name__ == "__main__":
try:
main(sys.argv[1])
except Exception as e:
print(e)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
import os
import sys
import time
import file_checker
DELAY = 1800 # check every 30 minutes
def check_directory(directory_path, last_check):
if directory_path[-1] != "/":
directory_path += "/"
directory = os.listdir(directory_path)
subdirectories = []
for f in directory:
path = f"{directory_path}{f}"
if os.path.isfile(path):
if last_check < datetime.fromtimestamp(os.path.getmtime(path)):
print(path)
file_checker.check_file(path)
else:
subdirectories.append(path)
for sub_dir in subdirectories:
check_directory(sub_dir, last_check)
def main(directory_path):
last_check = datetime.min
while True:
check_directory(directory_path, last_check)
last_check = datetime.now()
time.sleep(DELAY) # check directory every 30 min
if __name__ == "__main__":
try:
main(sys.argv[1])
except Exception as e:
print(e) | en | 0.506108 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # check every 30 minutes # check directory every 30 min | 3.012691 | 3 |
Module 4/task4_3.py | bondss/python_scripts | 0 | 6612301 | # TASK:
# Написать программу декодирования телефонного номера для АОН.
# По запросу АОНа АТС посылает телефонный номер, используя следующие правила:
# — Если цифра повторяется менее 2 раз, то это помеха и она должна быть отброшена
# — Каждая значащая цифра повторяется минимум 2 раза
# — Если в номере идут несколько цифр подряд,
# то для обозначения «такая же цифра как предыдущая» используется идущий 2 и более подряд раз знак #
# SOLUTION:
from functools import reduce
def aon(s):
# Remove single digits
s = ''.join(map(lambda x: x[1] if x[0] == x[1] else '', zip(s[1:], s)))
# Remove duplicates
s = reduce(lambda x, y: x + y if x[-1] != y else x, s)
# Decode '#'
s = reduce(lambda x, y: x + x[-1] if y == '#' else x + y, s)
# Remove leading '#'
s = s.strip('#')
return s
# Test suite
print(aon("###1111233343322#221#235555###1"))
print(aon("4434###552222311333661"))
| # TASK:
# Написать программу декодирования телефонного номера для АОН.
# По запросу АОНа АТС посылает телефонный номер, используя следующие правила:
# — Если цифра повторяется менее 2 раз, то это помеха и она должна быть отброшена
# — Каждая значащая цифра повторяется минимум 2 раза
# — Если в номере идут несколько цифр подряд,
# то для обозначения «такая же цифра как предыдущая» используется идущий 2 и более подряд раз знак #
# SOLUTION:
from functools import reduce
def aon(s):
# Remove single digits
s = ''.join(map(lambda x: x[1] if x[0] == x[1] else '', zip(s[1:], s)))
# Remove duplicates
s = reduce(lambda x, y: x + y if x[-1] != y else x, s)
# Decode '#'
s = reduce(lambda x, y: x + x[-1] if y == '#' else x + y, s)
# Remove leading '#'
s = s.strip('#')
return s
# Test suite
print(aon("###1111233343322#221#235555###1"))
print(aon("4434###552222311333661"))
| ru | 0.989874 | # TASK: # Написать программу декодирования телефонного номера для АОН. # По запросу АОНа АТС посылает телефонный номер, используя следующие правила: # — Если цифра повторяется менее 2 раз, то это помеха и она должна быть отброшена # — Каждая значащая цифра повторяется минимум 2 раза # — Если в номере идут несколько цифр подряд, # то для обозначения «такая же цифра как предыдущая» используется идущий 2 и более подряд раз знак # # SOLUTION: # Remove single digits # Remove duplicates # Decode '#' # Remove leading '#' # Test suite ##1111233343322#221#235555###1")) ###552222311333661")) | 3.45399 | 3 |
app/profile/views.py | green-mercury/SampleManagerWeb | 4 | 6612302 | <reponame>green-mercury/SampleManagerWeb<gh_stars>1-10
from flask import render_template, redirect, request, flash
from . import profile
from .. import db
from flask_login import login_required, current_user
from .forms import ChangePasswordForm, ChangeDetailsForm
from ..models import User
@profile.route('/changedetails', methods=['GET', 'POST'])
@login_required
def changedetails():
form = ChangeDetailsForm()
if form.is_submitted():
if form.validate():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Details updated.')
else:
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('profile/changedetails.html', form=form)
@profile.route('/changepassword', methods=['GET', 'POST'])
@login_required
def changepassword():
form = ChangePasswordForm()
if form.validate_on_submit():
user = current_user
if user.verify_password(form.oldpassword.data):
user.password = form.password.data
db.session.commit()
return redirect('/')
else:
flash('Password incorrect.')
return render_template('profile/changepassword.html', form=form)
@profile.route('/leave', methods=['GET'])
@login_required
def leave():
user = None
heirname = request.args.get("heir")
if heirname is not None:
# try to get corresponding user from database
user = User.query.filter_by(username=heirname).first()
if user is None or user.heir is not None or user == current_user:
flash("Please name a valid user that is still part of the laboratory.")
return render_template('profile/leave.html', user=None)
confirm = request.args.get("confirm")
reactivate = request.args.get("reactivate")
if reactivate == "1":
current_user.heir = None
db.session.commit()
if confirm == "1" and user is not None:
current_user.heir = user
inheritance = []
# need to create a list of inheritance first, because apparently database changes mess up the for-loop
for u in current_user.inheritance:
inheritance.append(u)
for u in inheritance:
u.heir = user
db.session.commit()
return render_template('profile/leave.html', user=user)
| from flask import render_template, redirect, request, flash
from . import profile
from .. import db
from flask_login import login_required, current_user
from .forms import ChangePasswordForm, ChangeDetailsForm
from ..models import User
@profile.route('/changedetails', methods=['GET', 'POST'])
@login_required
def changedetails():
form = ChangeDetailsForm()
if form.is_submitted():
if form.validate():
current_user.username = form.username.data
current_user.email = form.email.data
db.session.commit()
flash('Details updated.')
else:
form.username.data = current_user.username
form.email.data = current_user.email
return render_template('profile/changedetails.html', form=form)
@profile.route('/changepassword', methods=['GET', 'POST'])
@login_required
def changepassword():
form = ChangePasswordForm()
if form.validate_on_submit():
user = current_user
if user.verify_password(form.oldpassword.data):
user.password = form.password.data
db.session.commit()
return redirect('/')
else:
flash('Password incorrect.')
return render_template('profile/changepassword.html', form=form)
@profile.route('/leave', methods=['GET'])
@login_required
def leave():
user = None
heirname = request.args.get("heir")
if heirname is not None:
# try to get corresponding user from database
user = User.query.filter_by(username=heirname).first()
if user is None or user.heir is not None or user == current_user:
flash("Please name a valid user that is still part of the laboratory.")
return render_template('profile/leave.html', user=None)
confirm = request.args.get("confirm")
reactivate = request.args.get("reactivate")
if reactivate == "1":
current_user.heir = None
db.session.commit()
if confirm == "1" and user is not None:
current_user.heir = user
inheritance = []
# need to create a list of inheritance first, because apparently database changes mess up the for-loop
for u in current_user.inheritance:
inheritance.append(u)
for u in inheritance:
u.heir = user
db.session.commit()
return render_template('profile/leave.html', user=user) | en | 0.841042 | # try to get corresponding user from database # need to create a list of inheritance first, because apparently database changes mess up the for-loop | 2.640225 | 3 |
julynter/config.py | dew-uff/julynter | 9 | 6612303 | <gh_stars>1-10
"""Julynter config utility"""
# pylint: disable=import-outside-toplevel
import json
DEFAULT_EXPERIMENT_SERVER = "https://julynter.npimentel.net"
CONFIG_DIR = '.julynter'
def home_config_path():
"""Return home path"""
from .util import Path
return Path.home() / CONFIG_DIR
def load_home_config():
"""Return home confing"""
data = load_config(home_config_path())
add_experiment(data)
return data
def load_project_config(merge_home=True):
"""Return project config"""
from .util import Path
project_config = load_config(Path.cwd() / CONFIG_DIR)
if merge_home:
home_config = load_home_config()
project_config = merge(home_config, project_config)
return project_config
def save_home_config(data):
"""Save home path"""
return save_config(home_config_path(), data)
def save_project_config(data):
"""Save project path"""
from .util import Path
return save_config(Path.cwd() / CONFIG_DIR, data)
def load_config(base):
"""Load julynter config file"""
data = {}
try:
if base.is_dir() and (base / 'config.json').is_file():
with open(str(base / 'config.json'), 'r') as fil:
data = json.load(fil)
elif base.with_suffix('.rc').is_file():
with open(str(base.with_suffix('.rc')), 'r') as fil:
data = json.load(fil)
except json.JSONDecodeError as exc:
print("Julynter Config ({}) decode error:".format(base), exc)
return data
def save_config(base, data):
"""Save julynter config file"""
try:
if base.with_suffix('.rc').is_file():
with open(str(base.with_suffix('.rc')), 'w') as fil:
json.dump(data, fil)
else:
base.mkdir(parents=True, exist_ok=True)
with open(str(base / 'config.json'), 'w') as fil:
json.dump(data, fil)
return True
except json.JSONDecodeError as exc:
print("Julynter Config ({}) encode error:".format(base), exc)
return False
def add_experiment(data):
"""Add experiment config to data"""
if "experiment" not in data:
data["experiment"] = {}
exp = data['experiment']
exp['id'] = exp.get('id', '<unset>')
exp['lintingMessage'] = exp.get('lintingMessage', False)
exp['lintingTypes'] = exp.get('lintingTypes', False)
exp['activity'] = exp.get('activity', False)
exp['execution'] = exp.get('execution', False)
exp['code'] = exp.get('code', False)
exp['name'] = exp.get('name', False)
exp['enabled'] = exp.get('enabled', False)
exp['sendServer'] = exp.get('sendServer', False)
exp["server"] = exp.get("server", DEFAULT_EXPERIMENT_SERVER)
def merge(old, new):
"""Merge dicts"""
for key, value in new.items():
if key in old and isinstance(old[key], dict):
old[key] = merge(old[key], value)
else:
old[key] = value
return old
| """Julynter config utility"""
# pylint: disable=import-outside-toplevel
import json
DEFAULT_EXPERIMENT_SERVER = "https://julynter.npimentel.net"
CONFIG_DIR = '.julynter'
def home_config_path():
"""Return home path"""
from .util import Path
return Path.home() / CONFIG_DIR
def load_home_config():
"""Return home confing"""
data = load_config(home_config_path())
add_experiment(data)
return data
def load_project_config(merge_home=True):
"""Return project config"""
from .util import Path
project_config = load_config(Path.cwd() / CONFIG_DIR)
if merge_home:
home_config = load_home_config()
project_config = merge(home_config, project_config)
return project_config
def save_home_config(data):
"""Save home path"""
return save_config(home_config_path(), data)
def save_project_config(data):
"""Save project path"""
from .util import Path
return save_config(Path.cwd() / CONFIG_DIR, data)
def load_config(base):
"""Load julynter config file"""
data = {}
try:
if base.is_dir() and (base / 'config.json').is_file():
with open(str(base / 'config.json'), 'r') as fil:
data = json.load(fil)
elif base.with_suffix('.rc').is_file():
with open(str(base.with_suffix('.rc')), 'r') as fil:
data = json.load(fil)
except json.JSONDecodeError as exc:
print("Julynter Config ({}) decode error:".format(base), exc)
return data
def save_config(base, data):
"""Save julynter config file"""
try:
if base.with_suffix('.rc').is_file():
with open(str(base.with_suffix('.rc')), 'w') as fil:
json.dump(data, fil)
else:
base.mkdir(parents=True, exist_ok=True)
with open(str(base / 'config.json'), 'w') as fil:
json.dump(data, fil)
return True
except json.JSONDecodeError as exc:
print("Julynter Config ({}) encode error:".format(base), exc)
return False
def add_experiment(data):
"""Add experiment config to data"""
if "experiment" not in data:
data["experiment"] = {}
exp = data['experiment']
exp['id'] = exp.get('id', '<unset>')
exp['lintingMessage'] = exp.get('lintingMessage', False)
exp['lintingTypes'] = exp.get('lintingTypes', False)
exp['activity'] = exp.get('activity', False)
exp['execution'] = exp.get('execution', False)
exp['code'] = exp.get('code', False)
exp['name'] = exp.get('name', False)
exp['enabled'] = exp.get('enabled', False)
exp['sendServer'] = exp.get('sendServer', False)
exp["server"] = exp.get("server", DEFAULT_EXPERIMENT_SERVER)
def merge(old, new):
"""Merge dicts"""
for key, value in new.items():
if key in old and isinstance(old[key], dict):
old[key] = merge(old[key], value)
else:
old[key] = value
return old | en | 0.556818 | Julynter config utility # pylint: disable=import-outside-toplevel Return home path Return home confing Return project config Save home path Save project path Load julynter config file Save julynter config file Add experiment config to data Merge dicts | 2.513728 | 3 |
layers/transformer/direction_sensitive_geometric.py | RobertCsordas/tcf | 5 | 6612304 | import torch
from .multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
from typing import Optional
from .geometric_attention import geometric_attention_activation
import math
from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
global_content_bias: bool = True, input_size: Optional[int] = None,
output_size: Optional[int] = None, normalize_score: bool = True):
super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
if global_content_bias else None
self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
self.normalize_score = normalize_score
self.input_size = state_size if input_size is None else input_size
print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
self.reset_parameters()
def get_attention_scores(self, mask: Optional[torch.Tensor],
q_content: torch.Tensor, k_content: torch.Tensor,
q_pos: torch.Tensor,
pos_offset: int) -> torch.Tensor:
# content-content addressing
logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
# directionality. Do scaling here, less flops.
prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
logits = logits * self.scale + fpos + self.s_bias
logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
# data [batch * n_heads, len, c]
# bias [n_heads, c]
return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
if bias is not None else data
def _attention(self, mask: Optional[torch.Tensor],
q_content: torch.Tensor, k_content: torch.Tensor,
q_pos: torch.Tensor,
v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
# Scores shape: [n_batch * n_heads, n_out, n_in]
return self._attention_read(mask, scores, v)
def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
pos_offset: int = 0, need_weights: bool = False):
# curr_state: [batch_size, out_len, c]
# attend_to: [batch_size, in_len, c]
batch_size, in_len = attend_to.shape[0:2]
out_len = curr_state.shape[1]
k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
q, = self.transform_data(curr_state, self.data_to_q, 1)
q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
q_content = self.add_head_specific_bias(q, self.global_content_bias)
data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
pos_offset, need_weights=need_weights)
if need_weights:
return data, scores
else:
return data
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.data_to_q.weight)
torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
if self.global_content_bias is not None:
self.global_content_bias.data.fill_(0)
class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return torch.nn.init._no_grad_uniform_(tensor, -a, a)
def reset_parameters(self):
self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
if self.global_content_bias is not None:
self.global_content_bias.data.fill_(0)
| import torch
from .multi_head_attention import AttentionMask, MultiHeadAttentionBase, AttentionMergeMixin
from typing import Optional
from .geometric_attention import geometric_attention_activation
import math
from .multi_head_relative_pos_attention import FixedRelativeMultiheadAttentionBase, shift
class DirectionSensitiveGeometricAttention(AttentionMergeMixin, FixedRelativeMultiheadAttentionBase):
def __init__(self, state_size: int, n_heads: int, dropout: float = 0.0, global_pos_bias: bool = True,
global_content_bias: bool = True, input_size: Optional[int] = None,
output_size: Optional[int] = None, normalize_score: bool = True):
super(AttentionMergeMixin, self).__init__(state_size, n_heads, dropout, input_size)
self.data_to_kv = torch.nn.Linear(state_size, 2 * n_heads * self.projection_size, bias=False)
self.data_to_q = torch.nn.Linear(self.input_size, n_heads * self.projection_size, bias=False)
self.data_to_qp = torch.nn.Linear(self.input_size, n_heads * 2)
self.global_content_bias = torch.nn.Parameter(torch.zeros([n_heads, self.projection_size])) \
if global_content_bias else None
self.s_bias = torch.nn.Parameter(torch.full([1], 0.0))
self.scale = torch.nn.Parameter(torch.full([1], 1.0 / math.sqrt(self.projection_size)))
self.scale_pos = torch.nn.Parameter(torch.full([1], 1.0))
self.normalize_score = normalize_score
self.input_size = state_size if input_size is None else input_size
print(f"DirectionSensitiveGeometricAttention: normalize score: {normalize_score}")
super(DirectionSensitiveGeometricAttention, self).__init__(output_size)
self.reset_parameters()
def get_attention_scores(self, mask: Optional[torch.Tensor],
q_content: torch.Tensor, k_content: torch.Tensor,
q_pos: torch.Tensor,
pos_offset: int) -> torch.Tensor:
# content-content addressing
logits = torch.bmm(q_content, self.dropout(k_content).transpose(1, 2))
# directionality. Do scaling here, less flops.
prefer_back, prefer_front = (q_pos * self.scale_pos).unsqueeze(-2).expand(-1,-1,logits.shape[-1],-1).unbind(-1)
fpos = prefer_front.triu(1 + pos_offset) + prefer_back.tril(-1 + pos_offset)
logits = logits * self.scale + fpos + self.s_bias
logits = self.apply_logit_masks(logits.view(logits.shape[0] // self.n_heads, self.n_heads, *logits.shape[1:]), mask).flatten(0,1)
logits.masked_fill_(torch.eye(logits.shape[-1], device=logits.device, dtype=torch.bool)[pos_offset : pos_offset + logits.shape[-2]], float("-inf"))
return geometric_attention_activation(logits, mask, pos_offset, normalize=self.normalize_score)
def add_head_specific_bias(self, data: torch.Tensor, bias: Optional[torch.Tensor]) -> torch.Tensor:
# data [batch * n_heads, len, c]
# bias [n_heads, c]
return (data.view(-1, bias.shape[0], *data.shape[1:]) + bias.unsqueeze(1).type_as(data)).view_as(data) \
if bias is not None else data
def _attention(self, mask: Optional[torch.Tensor],
q_content: torch.Tensor, k_content: torch.Tensor,
q_pos: torch.Tensor,
v: torch.Tensor, pos_offset: int) -> [torch.Tensor, torch.Tensor]:
scores = self.get_attention_scores(mask, q_content, k_content, q_pos, pos_offset)
# Scores shape: [n_batch * n_heads, n_out, n_in]
return self._attention_read(mask, scores, v)
def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],
pos_offset: int = 0, need_weights: bool = False):
# curr_state: [batch_size, out_len, c]
# attend_to: [batch_size, in_len, c]
batch_size, in_len = attend_to.shape[0:2]
out_len = curr_state.shape[1]
k_content, v = self.transform_data(attend_to, self.data_to_kv, 2)
q, = self.transform_data(curr_state, self.data_to_q, 1)
q_pos, = self.transform_data(curr_state, self.data_to_qp, 1)
q_content = self.add_head_specific_bias(q, self.global_content_bias)
data, scores = self.merged_attention(batch_size, out_len, mask, q_content, k_content, q_pos, v,
pos_offset, need_weights=need_weights)
if need_weights:
return data, scores
else:
return data
def reset_parameters(self):
torch.nn.init.xavier_uniform_(self.data_to_q.weight)
torch.nn.init.xavier_uniform_(self.pos_to_pq.weight)
torch.nn.init.xavier_uniform_(self.data_to_kv.weight[:self.projection_size * self.n_heads])
torch.nn.init.xavier_uniform_(self.data_to_kv.weight[self.projection_size * self.n_heads:])
if self.global_content_bias is not None:
self.global_content_bias.data.fill_(0)
class DirectionSensitiveGeometricAttentionMyInit(DirectionSensitiveGeometricAttention):
def xavier_manual_(self, tensor: torch.Tensor, fan_in: int, fan_out: int, gain: float = 1) -> torch.Tensor:
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return torch.nn.init._no_grad_uniform_(tensor, -a, a)
def reset_parameters(self):
self.xavier_manual_(self.data_to_q.weight, self.state_size, self.projection_size)
self.xavier_manual_(self.pos_to_pq.weight, self.state_size, 2)
self.xavier_manual_(self.data_to_kv.weight, self.state_size, self.projection_size)
self.xavier_manual_(self.multi_head_merge.weight, self.projection_size, self.state_size)
if self.global_content_bias is not None:
self.global_content_bias.data.fill_(0)
| en | 0.840847 | # content-content addressing # directionality. Do scaling here, less flops. # data [batch * n_heads, len, c] # bias [n_heads, c] # Scores shape: [n_batch * n_heads, n_out, n_in] # curr_state: [batch_size, out_len, c] # attend_to: [batch_size, in_len, c] # Calculate uniform bounds from standard deviation | 1.954435 | 2 |
Estudos/collections_dict.py | Gbrvi/Python | 0 | 6612305 | <reponame>Gbrvi/Python
# Dict comprehension
a = {chave:valor for chave, valor in zip([1, 2, 3, 4, 5],
[6, 7, 8, 9, 10])} # Concatena valores. Com lista não daria certo
print(a)
| # Dict comprehension
a = {chave:valor for chave, valor in zip([1, 2, 3, 4, 5],
[6, 7, 8, 9, 10])} # Concatena valores. Com lista não daria certo
print(a) | pt | 0.952102 | # Dict comprehension # Concatena valores. Com lista não daria certo | 3.763192 | 4 |
tasks/task_14.py | AlexRogalskiy/Python | 0 | 6612306 | '''This is my first solution on Coderbyte'''
def FirstReverse(str):
# code goes here
ret_str = ''
for l in range(len(str)-1,-1,-1):
ret_str += str[l]
return ret_str
# keep this function call here
# to see how to enter arguments in Python scroll down
print FirstReverse(raw_input())
| '''This is my first solution on Coderbyte'''
def FirstReverse(str):
# code goes here
ret_str = ''
for l in range(len(str)-1,-1,-1):
ret_str += str[l]
return ret_str
# keep this function call here
# to see how to enter arguments in Python scroll down
print FirstReverse(raw_input())
| en | 0.810923 | This is my first solution on Coderbyte # code goes here # keep this function call here # to see how to enter arguments in Python scroll down | 3.792521 | 4 |
locations.py | jonahmajumder/bookmarker | 0 | 6612307 | <filename>locations.py<gh_stars>0
import sys
from pathlib import Path
from datetime import datetime
IS_BUNDLED = getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS')
LOGFILE = 'log.txt'
def log_header():
header = '\n'
header += 'Log file initiated at {}.\n'.format(datetime.now().isoformat())
header += 50 * '-'
header += '\n\n'
return header
if IS_BUNDLED:
RELATIVE_PATH = Path(sys._MEIPASS).parent / 'Resources'
else:
RELATIVE_PATH = Path(__file__).parents[0]
# in Resource dir within app bundle
def ResourceFile(path):
return str(Path.cwd() / RELATIVE_PATH / path)
HOME = str(Path.home())
DOCUMENTS = str(Path.home() / 'Documents')
if IS_BUNDLED:
# set up app to write to logfile
with open(ResourceFile(LOGFILE), 'a') as file:
file.write(log_header())
sys.stdout = open(ResourceFile(LOGFILE), 'a')
sys.stderr = open(ResourceFile(LOGFILE), 'a') | <filename>locations.py<gh_stars>0
import sys
from pathlib import Path
from datetime import datetime
IS_BUNDLED = getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS')
LOGFILE = 'log.txt'
def log_header():
header = '\n'
header += 'Log file initiated at {}.\n'.format(datetime.now().isoformat())
header += 50 * '-'
header += '\n\n'
return header
if IS_BUNDLED:
RELATIVE_PATH = Path(sys._MEIPASS).parent / 'Resources'
else:
RELATIVE_PATH = Path(__file__).parents[0]
# in Resource dir within app bundle
def ResourceFile(path):
return str(Path.cwd() / RELATIVE_PATH / path)
HOME = str(Path.home())
DOCUMENTS = str(Path.home() / 'Documents')
if IS_BUNDLED:
# set up app to write to logfile
with open(ResourceFile(LOGFILE), 'a') as file:
file.write(log_header())
sys.stdout = open(ResourceFile(LOGFILE), 'a')
sys.stderr = open(ResourceFile(LOGFILE), 'a') | en | 0.910698 | # in Resource dir within app bundle # set up app to write to logfile | 2.306227 | 2 |
timemachine/potentials/jax_utils.py | schmolly/timemachine | 0 | 6612308 | <gh_stars>0
import jax.numpy as np
def convert_to_4d(x3, lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff):
# (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset
# followed by a scaling by cutoff.
# lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction
# independent of the lambda value.
# lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate
# in a lambda-dependent way.
d4 = cutoff*(lambda_plane_idxs + lambda_offset_idxs*lamb)
d4 = np.expand_dims(d4, axis=-1)
x4 = np.concatenate((x3, d4), axis=1)
return x4
def rescale_coordinates(
conf,
indices,
box,
scales):
mol_sizes = np.expand_dims(onp.bincount(indices), axis=1)
mol_centers = jax.ops.segment_sum(coords, indices)/mol_sizes
new_centers = mol_centers - box[2]*np.floor(np.expand_dims(mol_centers[...,2], axis=-1)/box[2][2])
new_centers -= box[1]*np.floor(np.expand_dims(new_centers[...,1], axis=-1)/box[1][1])
new_centers -= box[0]*np.floor(np.expand_dims(new_centers[...,0], axis=-1)/box[0][0])
offset = new_centers - mol_centers
return conf + offset[indices]
def delta_r(ri, rj, box=None):
diff = ri - rj # this can be either N,N,3 or B,3
dims = ri.shape[-1]
# box is None for harmonic bonds, not None for nonbonded terms
if box is not None:
for d in range(dims):
diff -= box[d]*np.floor(np.expand_dims(diff[...,d], axis=-1)/box[d][d]+0.5)
return diff
# def distance(ri, rj, box=None, gij=None):
# # assert box is None
# if gij is not None:
# deltas_4d = np.power(ri - rj, 2)
# # print(deltas_4d.shape)
# deltas_3d = deltas_4d[..., :3]
# # print(deltas_3d.shape)
# dij_4d = np.sqrt(np.sum(deltas_4d, axis=-1))
# dij_3d = np.sqrt(np.sum(deltas_3d, axis=-1))
# # print("shapes", gij.shape, dij_3d.shape, dij_4d.shape)
# dij = np.where(gij, dij_3d, dij_4d)
# else:
# deltas = np.power(ri - rj, 2)
# dij = np.sqrt(np.sum(deltas, axis=-1))
# # print(dij)
# return dij
def distance(ri, rj, box=None):
dxdydz = np.power(delta_r(ri, rj, box), 2)
# np.linalg.norm nans but this doesn't
dij = np.sqrt(np.sum(dxdydz, axis=-1))
return dij
| import jax.numpy as np
def convert_to_4d(x3, lamb, lambda_plane_idxs, lambda_offset_idxs, cutoff):
# (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset
# followed by a scaling by cutoff.
# lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction
# independent of the lambda value.
# lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate
# in a lambda-dependent way.
d4 = cutoff*(lambda_plane_idxs + lambda_offset_idxs*lamb)
d4 = np.expand_dims(d4, axis=-1)
x4 = np.concatenate((x3, d4), axis=1)
return x4
def rescale_coordinates(
conf,
indices,
box,
scales):
mol_sizes = np.expand_dims(onp.bincount(indices), axis=1)
mol_centers = jax.ops.segment_sum(coords, indices)/mol_sizes
new_centers = mol_centers - box[2]*np.floor(np.expand_dims(mol_centers[...,2], axis=-1)/box[2][2])
new_centers -= box[1]*np.floor(np.expand_dims(new_centers[...,1], axis=-1)/box[1][1])
new_centers -= box[0]*np.floor(np.expand_dims(new_centers[...,0], axis=-1)/box[0][0])
offset = new_centers - mol_centers
return conf + offset[indices]
def delta_r(ri, rj, box=None):
diff = ri - rj # this can be either N,N,3 or B,3
dims = ri.shape[-1]
# box is None for harmonic bonds, not None for nonbonded terms
if box is not None:
for d in range(dims):
diff -= box[d]*np.floor(np.expand_dims(diff[...,d], axis=-1)/box[d][d]+0.5)
return diff
# def distance(ri, rj, box=None, gij=None):
# # assert box is None
# if gij is not None:
# deltas_4d = np.power(ri - rj, 2)
# # print(deltas_4d.shape)
# deltas_3d = deltas_4d[..., :3]
# # print(deltas_3d.shape)
# dij_4d = np.sqrt(np.sum(deltas_4d, axis=-1))
# dij_3d = np.sqrt(np.sum(deltas_3d, axis=-1))
# # print("shapes", gij.shape, dij_3d.shape, dij_4d.shape)
# dij = np.where(gij, dij_3d, dij_4d)
# else:
# deltas = np.power(ri - rj, 2)
# dij = np.sqrt(np.sum(deltas, axis=-1))
# # print(dij)
# return dij
def distance(ri, rj, box=None):
dxdydz = np.power(delta_r(ri, rj, box), 2)
# np.linalg.norm nans but this doesn't
dij = np.sqrt(np.sum(dxdydz, axis=-1))
return dij | en | 0.655009 | # (ytz): this initializes the 4th dimension to a fixed plane adjust by an offset # followed by a scaling by cutoff. # lambda_plane_idxs are typically 0 or 1 and allows us to turn off an interaction # independent of the lambda value. # lambda_offset_idxs are typically 0 and 1, and allows us to adjust the w coordinate # in a lambda-dependent way. # this can be either N,N,3 or B,3 # box is None for harmonic bonds, not None for nonbonded terms # def distance(ri, rj, box=None, gij=None): # # assert box is None # if gij is not None: # deltas_4d = np.power(ri - rj, 2) # # print(deltas_4d.shape) # deltas_3d = deltas_4d[..., :3] # # print(deltas_3d.shape) # dij_4d = np.sqrt(np.sum(deltas_4d, axis=-1)) # dij_3d = np.sqrt(np.sum(deltas_3d, axis=-1)) # # print("shapes", gij.shape, dij_3d.shape, dij_4d.shape) # dij = np.where(gij, dij_3d, dij_4d) # else: # deltas = np.power(ri - rj, 2) # dij = np.sqrt(np.sum(deltas, axis=-1)) # # print(dij) # return dij # np.linalg.norm nans but this doesn't | 2.222795 | 2 |
QUANTAXIS/QASU/update_tushare.py | 5267/QUANTAXIS | 5 | 6612309 | <gh_stars>1-10
# coding:utf-8
from QUANTAXIS.QAFetch import QATushare
from QUANTAXIS.QAUtil import QA_util_date_stamp,QA_Setting,QA_util_date_valid
import json
import pymongo
import datetime
import re
import time
def QA_update_stock_day(name,startDate,endDate):
data=QATushare.QA_fetch_get_stock_day(name,startDate,endDate)
def QA_update_stock_day_all(code,client):
coll_stocklist=client.quantaxis.stock_list
stock_list=coll_stocklist.find_one()['stock']['code']
coll_stock_day=client.quantaxis.stock_day
for item in stock_list:
#coll.find({'code':str(item)[0:6]}).count()
#先拿到最后一个记录的交易日期
start_date=coll_stock_day.find({'code':str(item)[0:6]})[coll_stock_day.find({'code':str(item)[0:6]}).count()-1]['date']
end_date=str(datetime.date.today())
data=QATushare.QA_fetch_get_stock_day(str(item)[0:6],start_date,end_date)[1::]
coll_stock_day.insert_many(data)
def QA_update_standard_sql():
print('正在整理和更新数据,请稍等.....')
coll=pymongo.MongoClient().quantaxis.stock_day
coll.ensure_index('code')
"""
for item in coll.find():
date=item['date']
date_stamp=QA_util_date_stamp(date)
coll.update({"_id":item['_id']},{'$set':{'date_stamp':date_stamp}})
"""
| # coding:utf-8
from QUANTAXIS.QAFetch import QATushare
from QUANTAXIS.QAUtil import QA_util_date_stamp,QA_Setting,QA_util_date_valid
import json
import pymongo
import datetime
import re
import time
def QA_update_stock_day(name,startDate,endDate):
data=QATushare.QA_fetch_get_stock_day(name,startDate,endDate)
def QA_update_stock_day_all(code,client):
coll_stocklist=client.quantaxis.stock_list
stock_list=coll_stocklist.find_one()['stock']['code']
coll_stock_day=client.quantaxis.stock_day
for item in stock_list:
#coll.find({'code':str(item)[0:6]}).count()
#先拿到最后一个记录的交易日期
start_date=coll_stock_day.find({'code':str(item)[0:6]})[coll_stock_day.find({'code':str(item)[0:6]}).count()-1]['date']
end_date=str(datetime.date.today())
data=QATushare.QA_fetch_get_stock_day(str(item)[0:6],start_date,end_date)[1::]
coll_stock_day.insert_many(data)
def QA_update_standard_sql():
print('正在整理和更新数据,请稍等.....')
coll=pymongo.MongoClient().quantaxis.stock_day
coll.ensure_index('code')
"""
for item in coll.find():
date=item['date']
date_stamp=QA_util_date_stamp(date)
coll.update({"_id":item['_id']},{'$set':{'date_stamp':date_stamp}})
""" | fa | 0.110929 | # coding:utf-8 #coll.find({'code':str(item)[0:6]}).count() #先拿到最后一个记录的交易日期 for item in coll.find(): date=item['date'] date_stamp=QA_util_date_stamp(date) coll.update({"_id":item['_id']},{'$set':{'date_stamp':date_stamp}}) | 2.461287 | 2 |
src/Chem_processing.py | deepsystemspharmacology/drug_combination | 3 | 6612310 | import pubchempy as pcp
import logging
from src import setting
# Setting up log file
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh = logging.FileHandler(setting.run_specific_log, mode='a')
fh.setFormatter(fmt=formatter)
logger = logging.getLogger("Processing chemicals")
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
def smile2ichikey(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchikey
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
def smile2ichi(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchi
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
| import pubchempy as pcp
import logging
from src import setting
# Setting up log file
formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(name)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
fh = logging.FileHandler(setting.run_specific_log, mode='a')
fh.setFormatter(fmt=formatter)
logger = logging.getLogger("Processing chemicals")
logger.addHandler(fh)
logger.setLevel(logging.DEBUG)
def smile2ichikey(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchikey
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
def smile2ichi(smile):
try:
compounds = pcp.get_compounds(smile, namespace='smiles')
if len(compounds) == 1:
return compounds[0].inchi
else:
logging.info("Found more than one inchikey")
return [x.inchikey for x in compounds]
except:
return None
| en | 0.818635 | # Setting up log file | 2.450607 | 2 |
ml_project_template/api.py | City-of-Helsinki/kawai_demo_tapani | 0 | 6612311 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_api.ipynb (unless otherwise specified).
__all__ = []
# Cell
# your code here
# Cell
# your code here | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_api.ipynb (unless otherwise specified).
__all__ = []
# Cell
# your code here
# Cell
# your code here | en | 0.683833 | # AUTOGENERATED! DO NOT EDIT! File to edit: 04_api.ipynb (unless otherwise specified). # Cell # your code here # Cell # your code here | 1.107975 | 1 |
sdk/python/pulumi_signalfx/azure/integration.py | pulumi/pulumi-signalfx | 2 | 6612312 | <filename>sdk/python/pulumi_signalfx/azure/integration.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['IntegrationArgs', 'Integration']
@pulumi.input_type
class IntegrationArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
enabled: pulumi.Input[bool],
secret_key: pulumi.Input[str],
services: pulumi.Input[Sequence[pulumi.Input[str]]],
subscriptions: pulumi.Input[Sequence[pulumi.Input[str]]],
tenant_id: pulumi.Input[str],
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Integration resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "secret_key", secret_key)
pulumi.set(__self__, "services", services)
pulumi.set(__self__, "subscriptions", subscriptions)
pulumi.set(__self__, "tenant_id", tenant_id)
if custom_namespaces_per_services is not None:
pulumi.set(__self__, "custom_namespaces_per_services", custom_namespaces_per_services)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if name is not None:
pulumi.set(__self__, "name", name)
if named_token is not None:
pulumi.set(__self__, "named_token", named_token)
if poll_rate is not None:
pulumi.set(__self__, "poll_rate", poll_rate)
if sync_guest_os_namespaces is not None:
pulumi.set(__self__, "sync_guest_os_namespaces", sync_guest_os_namespaces)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Input[str]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def services(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def subscriptions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@subscriptions.setter
def subscriptions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subscriptions", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@custom_namespaces_per_services.setter
def custom_namespaces_per_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]):
pulumi.set(self, "custom_namespaces_per_services", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> Optional[pulumi.Input[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@named_token.setter
def named_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "named_token", value)
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> Optional[pulumi.Input[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@poll_rate.setter
def poll_rate(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poll_rate", value)
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> Optional[pulumi.Input[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@sync_guest_os_namespaces.setter
def sync_guest_os_namespaces(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_guest_os_namespaces", value)
@pulumi.input_type
class _IntegrationState:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Integration resources.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if custom_namespaces_per_services is not None:
pulumi.set(__self__, "custom_namespaces_per_services", custom_namespaces_per_services)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if name is not None:
pulumi.set(__self__, "name", name)
if named_token is not None:
pulumi.set(__self__, "named_token", named_token)
if poll_rate is not None:
pulumi.set(__self__, "poll_rate", poll_rate)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if services is not None:
pulumi.set(__self__, "services", services)
if subscriptions is not None:
pulumi.set(__self__, "subscriptions", subscriptions)
if sync_guest_os_namespaces is not None:
pulumi.set(__self__, "sync_guest_os_namespaces", sync_guest_os_namespaces)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@custom_namespaces_per_services.setter
def custom_namespaces_per_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]):
pulumi.set(self, "custom_namespaces_per_services", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> Optional[pulumi.Input[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@named_token.setter
def named_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "named_token", value)
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> Optional[pulumi.Input[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@poll_rate.setter
def poll_rate(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poll_rate", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@subscriptions.setter
def subscriptions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subscriptions", value)
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> Optional[pulumi.Input[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@sync_guest_os_namespaces.setter
def sync_guest_os_namespaces(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_guest_os_namespaces", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
class Integration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
azure_myteam = signalfx.azure.Integration("azureMyteam",
app_id="YYY",
custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs(
namespaces=[
"monitoringAgent",
"customNamespace",
],
service="Microsoft.Compute/virtualMachines",
)],
enabled=True,
environment="azure",
poll_rate=300,
secret_key="XXX",
services=["microsoft.sql/servers/elasticpools"],
subscriptions=["sub-guid-here"],
tenant_id="ZZZ")
```
## Service Names
> **NOTE** You can use the data source "azure.getServices" to specify all services.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntegrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
azure_myteam = signalfx.azure.Integration("azureMyteam",
app_id="YYY",
custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs(
namespaces=[
"monitoringAgent",
"customNamespace",
],
service="Microsoft.Compute/virtualMachines",
)],
enabled=True,
environment="azure",
poll_rate=300,
secret_key="XXX",
services=["microsoft.sql/servers/elasticpools"],
subscriptions=["sub-guid-here"],
tenant_id="ZZZ")
```
## Service Names
> **NOTE** You can use the data source "azure.getServices" to specify all services.
:param str resource_name: The name of the resource.
:param IntegrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntegrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IntegrationArgs.__new__(IntegrationArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
__props__.__dict__["custom_namespaces_per_services"] = custom_namespaces_per_services
if enabled is None and not opts.urn:
raise TypeError("Missing required property 'enabled'")
__props__.__dict__["enabled"] = enabled
__props__.__dict__["environment"] = environment
__props__.__dict__["name"] = name
__props__.__dict__["named_token"] = named_token
__props__.__dict__["poll_rate"] = poll_rate
if secret_key is None and not opts.urn:
raise TypeError("Missing required property 'secret_key'")
__props__.__dict__["secret_key"] = secret_key
if services is None and not opts.urn:
raise TypeError("Missing required property 'services'")
__props__.__dict__["services"] = services
if subscriptions is None and not opts.urn:
raise TypeError("Missing required property 'subscriptions'")
__props__.__dict__["subscriptions"] = subscriptions
__props__.__dict__["sync_guest_os_namespaces"] = sync_guest_os_namespaces
if tenant_id is None and not opts.urn:
raise TypeError("Missing required property 'tenant_id'")
__props__.__dict__["tenant_id"] = tenant_id
super(Integration, __self__).__init__(
'signalfx:azure/integration:Integration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None) -> 'Integration':
"""
Get an existing Integration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IntegrationState.__new__(_IntegrationState)
__props__.__dict__["app_id"] = app_id
__props__.__dict__["custom_namespaces_per_services"] = custom_namespaces_per_services
__props__.__dict__["enabled"] = enabled
__props__.__dict__["environment"] = environment
__props__.__dict__["name"] = name
__props__.__dict__["named_token"] = named_token
__props__.__dict__["poll_rate"] = poll_rate
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["services"] = services
__props__.__dict__["subscriptions"] = subscriptions
__props__.__dict__["sync_guest_os_namespaces"] = sync_guest_os_namespaces
__props__.__dict__["tenant_id"] = tenant_id
return Integration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> pulumi.Output[Optional[Sequence['outputs.IntegrationCustomNamespacesPerService']]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def environment(self) -> pulumi.Output[Optional[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> pulumi.Output[Optional[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> pulumi.Output[Optional[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Output[str]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@property
@pulumi.getter
def services(self) -> pulumi.Output[Sequence[str]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@property
@pulumi.getter
def subscriptions(self) -> pulumi.Output[Sequence[str]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> pulumi.Output[Optional[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
| <filename>sdk/python/pulumi_signalfx/azure/integration.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['IntegrationArgs', 'Integration']
@pulumi.input_type
class IntegrationArgs:
def __init__(__self__, *,
app_id: pulumi.Input[str],
enabled: pulumi.Input[bool],
secret_key: pulumi.Input[str],
services: pulumi.Input[Sequence[pulumi.Input[str]]],
subscriptions: pulumi.Input[Sequence[pulumi.Input[str]]],
tenant_id: pulumi.Input[str],
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a Integration resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
pulumi.set(__self__, "app_id", app_id)
pulumi.set(__self__, "enabled", enabled)
pulumi.set(__self__, "secret_key", secret_key)
pulumi.set(__self__, "services", services)
pulumi.set(__self__, "subscriptions", subscriptions)
pulumi.set(__self__, "tenant_id", tenant_id)
if custom_namespaces_per_services is not None:
pulumi.set(__self__, "custom_namespaces_per_services", custom_namespaces_per_services)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if name is not None:
pulumi.set(__self__, "name", name)
if named_token is not None:
pulumi.set(__self__, "named_token", named_token)
if poll_rate is not None:
pulumi.set(__self__, "poll_rate", poll_rate)
if sync_guest_os_namespaces is not None:
pulumi.set(__self__, "sync_guest_os_namespaces", sync_guest_os_namespaces)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Input[str]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: pulumi.Input[str]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter
def enabled(self) -> pulumi.Input[bool]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: pulumi.Input[bool]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Input[str]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def services(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def subscriptions(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@subscriptions.setter
def subscriptions(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "subscriptions", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@custom_namespaces_per_services.setter
def custom_namespaces_per_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]):
pulumi.set(self, "custom_namespaces_per_services", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> Optional[pulumi.Input[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@named_token.setter
def named_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "named_token", value)
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> Optional[pulumi.Input[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@poll_rate.setter
def poll_rate(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poll_rate", value)
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> Optional[pulumi.Input[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@sync_guest_os_namespaces.setter
def sync_guest_os_namespaces(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_guest_os_namespaces", value)
@pulumi.input_type
class _IntegrationState:
def __init__(__self__, *,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Integration resources.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
if app_id is not None:
pulumi.set(__self__, "app_id", app_id)
if custom_namespaces_per_services is not None:
pulumi.set(__self__, "custom_namespaces_per_services", custom_namespaces_per_services)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if environment is not None:
pulumi.set(__self__, "environment", environment)
if name is not None:
pulumi.set(__self__, "name", name)
if named_token is not None:
pulumi.set(__self__, "named_token", named_token)
if poll_rate is not None:
pulumi.set(__self__, "poll_rate", poll_rate)
if secret_key is not None:
pulumi.set(__self__, "secret_key", secret_key)
if services is not None:
pulumi.set(__self__, "services", services)
if subscriptions is not None:
pulumi.set(__self__, "subscriptions", subscriptions)
if sync_guest_os_namespaces is not None:
pulumi.set(__self__, "sync_guest_os_namespaces", sync_guest_os_namespaces)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="appId")
def app_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@app_id.setter
def app_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "app_id", value)
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@custom_namespaces_per_services.setter
def custom_namespaces_per_services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]]]):
pulumi.set(self, "custom_namespaces_per_services", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def environment(self) -> Optional[pulumi.Input[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@environment.setter
def environment(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "environment", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> Optional[pulumi.Input[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@named_token.setter
def named_token(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "named_token", value)
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> Optional[pulumi.Input[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@poll_rate.setter
def poll_rate(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poll_rate", value)
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> Optional[pulumi.Input[str]]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@secret_key.setter
def secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_key", value)
@property
@pulumi.getter
def services(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@services.setter
def services(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "services", value)
@property
@pulumi.getter
def subscriptions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@subscriptions.setter
def subscriptions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subscriptions", value)
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> Optional[pulumi.Input[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@sync_guest_os_namespaces.setter
def sync_guest_os_namespaces(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_guest_os_namespaces", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
class Integration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
azure_myteam = signalfx.azure.Integration("azureMyteam",
app_id="YYY",
custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs(
namespaces=[
"monitoringAgent",
"customNamespace",
],
service="Microsoft.Compute/virtualMachines",
)],
enabled=True,
environment="azure",
poll_rate=300,
secret_key="XXX",
services=["microsoft.sql/servers/elasticpools"],
subscriptions=["sub-guid-here"],
tenant_id="ZZZ")
```
## Service Names
> **NOTE** You can use the data source "azure.getServices" to specify all services.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntegrationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure).
> **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error.
## Example Usage
```python
import pulumi
import pulumi_signalfx as signalfx
azure_myteam = signalfx.azure.Integration("azureMyteam",
app_id="YYY",
custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs(
namespaces=[
"monitoringAgent",
"customNamespace",
],
service="Microsoft.Compute/virtualMachines",
)],
enabled=True,
environment="azure",
poll_rate=300,
secret_key="XXX",
services=["microsoft.sql/servers/elasticpools"],
subscriptions=["sub-guid-here"],
tenant_id="ZZZ")
```
## Service Names
> **NOTE** You can use the data source "azure.getServices" to specify all services.
:param str resource_name: The name of the resource.
:param IntegrationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntegrationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IntegrationArgs.__new__(IntegrationArgs)
if app_id is None and not opts.urn:
raise TypeError("Missing required property 'app_id'")
__props__.__dict__["app_id"] = app_id
__props__.__dict__["custom_namespaces_per_services"] = custom_namespaces_per_services
if enabled is None and not opts.urn:
raise TypeError("Missing required property 'enabled'")
__props__.__dict__["enabled"] = enabled
__props__.__dict__["environment"] = environment
__props__.__dict__["name"] = name
__props__.__dict__["named_token"] = named_token
__props__.__dict__["poll_rate"] = poll_rate
if secret_key is None and not opts.urn:
raise TypeError("Missing required property 'secret_key'")
__props__.__dict__["secret_key"] = secret_key
if services is None and not opts.urn:
raise TypeError("Missing required property 'services'")
__props__.__dict__["services"] = services
if subscriptions is None and not opts.urn:
raise TypeError("Missing required property 'subscriptions'")
__props__.__dict__["subscriptions"] = subscriptions
__props__.__dict__["sync_guest_os_namespaces"] = sync_guest_os_namespaces
if tenant_id is None and not opts.urn:
raise TypeError("Missing required property 'tenant_id'")
__props__.__dict__["tenant_id"] = tenant_id
super(Integration, __self__).__init__(
'signalfx:azure/integration:Integration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
app_id: Optional[pulumi.Input[str]] = None,
custom_namespaces_per_services: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
environment: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
named_token: Optional[pulumi.Input[str]] = None,
poll_rate: Optional[pulumi.Input[int]] = None,
secret_key: Optional[pulumi.Input[str]] = None,
services: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
subscriptions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
sync_guest_os_namespaces: Optional[pulumi.Input[bool]] = None,
tenant_id: Optional[pulumi.Input[str]] = None) -> 'Integration':
"""
Get an existing Integration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
:param pulumi.Input[bool] enabled: Whether the integration is enabled.
:param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
:param pulumi.Input[str] name: Name of the integration.
:param pulumi.Input[str] named_token: A named token to use for ingest
:param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`.
:param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
:param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor.
:param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
:param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IntegrationState.__new__(_IntegrationState)
__props__.__dict__["app_id"] = app_id
__props__.__dict__["custom_namespaces_per_services"] = custom_namespaces_per_services
__props__.__dict__["enabled"] = enabled
__props__.__dict__["environment"] = environment
__props__.__dict__["name"] = name
__props__.__dict__["named_token"] = named_token
__props__.__dict__["poll_rate"] = poll_rate
__props__.__dict__["secret_key"] = secret_key
__props__.__dict__["services"] = services
__props__.__dict__["subscriptions"] = subscriptions
__props__.__dict__["sync_guest_os_namespaces"] = sync_guest_os_namespaces
__props__.__dict__["tenant_id"] = tenant_id
return Integration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="appId")
def app_id(self) -> pulumi.Output[str]:
"""
Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation.
"""
return pulumi.get(self, "app_id")
@property
@pulumi.getter(name="customNamespacesPerServices")
def custom_namespaces_per_services(self) -> pulumi.Output[Optional[Sequence['outputs.IntegrationCustomNamespacesPerService']]]:
"""
Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces.
"""
return pulumi.get(self, "custom_namespaces_per_services")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[bool]:
"""
Whether the integration is enabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def environment(self) -> pulumi.Output[Optional[str]]:
"""
What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`.
"""
return pulumi.get(self, "environment")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the integration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="namedToken")
def named_token(self) -> pulumi.Output[Optional[str]]:
"""
A named token to use for ingest
"""
return pulumi.get(self, "named_token")
@property
@pulumi.getter(name="pollRate")
def poll_rate(self) -> pulumi.Output[Optional[int]]:
"""
AWS poll rate (in seconds). One of `60` or `300`.
"""
return pulumi.get(self, "poll_rate")
@property
@pulumi.getter(name="secretKey")
def secret_key(self) -> pulumi.Output[str]:
"""
Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "secret_key")
@property
@pulumi.getter
def services(self) -> pulumi.Output[Sequence[str]]:
"""
List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values.
"""
return pulumi.get(self, "services")
@property
@pulumi.getter
def subscriptions(self) -> pulumi.Output[Sequence[str]]:
"""
List of Azure subscriptions that SignalFx should monitor.
"""
return pulumi.get(self, "subscriptions")
@property
@pulumi.getter(name="syncGuestOsNamespaces")
def sync_guest_os_namespaces(self) -> pulumi.Output[Optional[bool]]:
"""
If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false.
"""
return pulumi.get(self, "sync_guest_os_namespaces")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
"""
Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation.
"""
return pulumi.get(self, "tenant_id")
| en | 0.648002 | # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Integration resource. :param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. :param pulumi.Input[bool] enabled: Whether the integration is enabled. :param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. :param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor. :param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. :param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. :param pulumi.Input[str] name: Name of the integration. :param pulumi.Input[str] named_token: A named token to use for ingest :param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`. :param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. Whether the integration is enabled. Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. List of Azure subscriptions that SignalFx should monitor. Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. Name of the integration. A named token to use for ingest AWS poll rate (in seconds). One of `60` or `300`. If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. Input properties used for looking up and filtering Integration resources. :param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input['IntegrationCustomNamespacesPerServiceArgs']]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. :param pulumi.Input[bool] enabled: Whether the integration is enabled. :param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. :param pulumi.Input[str] name: Name of the integration. :param pulumi.Input[str] named_token: A named token to use for ingest :param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`. :param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. :param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor. :param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. :param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. Whether the integration is enabled. What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. Name of the integration. A named token to use for ingest AWS poll rate (in seconds). One of `60` or `300`. Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. List of Azure subscriptions that SignalFx should monitor. If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure). > **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error. ## Example Usage ```python import pulumi import pulumi_signalfx as signalfx azure_myteam = signalfx.azure.Integration("azureMyteam", app_id="YYY", custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs( namespaces=[ "monitoringAgent", "customNamespace", ], service="Microsoft.Compute/virtualMachines", )], enabled=True, environment="azure", poll_rate=300, secret_key="XXX", services=["microsoft.sql/servers/elasticpools"], subscriptions=["sub-guid-here"], tenant_id="ZZZ") ``` ## Service Names > **NOTE** You can use the data source "azure.getServices" to specify all services. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. :param pulumi.Input[bool] enabled: Whether the integration is enabled. :param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. :param pulumi.Input[str] name: Name of the integration. :param pulumi.Input[str] named_token: A named token to use for ingest :param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`. :param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. :param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor. :param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. :param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. SignalFx Azure integrations. For help with this integration see [Monitoring Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure). > **NOTE** When managing integrations you'll need to use an admin token to authenticate the SignalFx provider. Otherwise you'll receive a 4xx error. ## Example Usage ```python import pulumi import pulumi_signalfx as signalfx azure_myteam = signalfx.azure.Integration("azureMyteam", app_id="YYY", custom_namespaces_per_services=[signalfx.azure.IntegrationCustomNamespacesPerServiceArgs( namespaces=[ "monitoringAgent", "customNamespace", ], service="Microsoft.Compute/virtualMachines", )], enabled=True, environment="azure", poll_rate=300, secret_key="XXX", services=["microsoft.sql/servers/elasticpools"], subscriptions=["sub-guid-here"], tenant_id="ZZZ") ``` ## Service Names > **NOTE** You can use the data source "azure.getServices" to specify all services. :param str resource_name: The name of the resource. :param IntegrationArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Integration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] app_id: Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['IntegrationCustomNamespacesPerServiceArgs']]]] custom_namespaces_per_services: Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. :param pulumi.Input[bool] enabled: Whether the integration is enabled. :param pulumi.Input[str] environment: What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. :param pulumi.Input[str] name: Name of the integration. :param pulumi.Input[str] named_token: A named token to use for ingest :param pulumi.Input[int] poll_rate: AWS poll rate (in seconds). One of `60` or `300`. :param pulumi.Input[str] secret_key: Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. :param pulumi.Input[Sequence[pulumi.Input[str]]] services: List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. :param pulumi.Input[Sequence[pulumi.Input[str]]] subscriptions: List of Azure subscriptions that SignalFx should monitor. :param pulumi.Input[bool] sync_guest_os_namespaces: If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. :param pulumi.Input[str] tenant_id: Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. Azure application ID for the SignalFx app. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/getting-started/send-data.html#connect-to-microsoft-azure) in the product documentation. Allows for more fine-grained control of syncing of custom namespaces, should the boolean convenience parameter `sync_guest_os_namespaces` be not enough. The customer may specify a map of services to custom namespaces. If they do so, for each service which is a key in this map, we will attempt to sync metrics from namespaces in the value list in addition to the default namespaces. Whether the integration is enabled. What type of Azure integration this is. The allowed values are `\"azure_us_government\"` and `\"azure\"`. Defaults to `\"azure\"`. Name of the integration. A named token to use for ingest AWS poll rate (in seconds). One of `60` or `300`. Azure secret key that associates the SignalFx app in Azure with the Azure tenant ID. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. List of Microsoft Azure service names for the Azure services you want SignalFx to monitor. See the documentation for [Creating Integrations](https://developers.signalfx.com/integrations_reference.html#operation/Create%20Integration) for valida values. List of Azure subscriptions that SignalFx should monitor. If enabled, SignalFx will try to sync additional namespaces for VMs (including VMs in scale sets): telegraf/mem, telegraf/cpu, azure.vm.windows.guest (these are namespaces recommended by Azure when enabling their Diagnostic Extension). If there are no metrics there, no new datapoints will be ingested. Defaults to false. Azure ID of the Azure tenant. To learn how to get this ID, see the topic [Connect to Microsoft Azure](https://docs.signalfx.com/en/latest/integrations/azure-info.html#connect-to-azure) in the product documentation. | 1.534481 | 2 |
locale/pot/api/plotting/_autosummary/pyvista-themes-_SilhouetteConfig-line_width-1.py | tkoyama010/pyvista-doc-translations | 4 | 6612313 | <filename>locale/pot/api/plotting/_autosummary/pyvista-themes-_SilhouetteConfig-line_width-1.py
import pyvista
pyvista.global_theme.silhouette.line_width = 2.0
| <filename>locale/pot/api/plotting/_autosummary/pyvista-themes-_SilhouetteConfig-line_width-1.py
import pyvista
pyvista.global_theme.silhouette.line_width = 2.0
| none | 1 | 1.004103 | 1 | |
models/slug.py | jonchui/MyLife | 27 | 6612314 | import datetime
from google.appengine.ext import ndb
class Slug(ndb.Model):
slug = ndb.StringProperty()
date = ndb.DateProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
| import datetime
from google.appengine.ext import ndb
class Slug(ndb.Model):
slug = ndb.StringProperty()
date = ndb.DateProperty()
created = ndb.DateTimeProperty(auto_now_add=True)
| none | 1 | 2.600128 | 3 | |
dg/model/mixins.py | alefnula/dg | 0 | 6612315 | __author__ = '<NAME> <<EMAIL>>'
__date__ = ' 31 December 2017'
__copyright__ = 'Copyright (c) 2017 <NAME>'
class ClassifierMixin(object):
"""Base class for all classifiers"""
_estimator_type = 'classifier'
class RegressorMixin(object):
"""Base class for all regressors"""
_estimator_type = 'regressor'
| __author__ = '<NAME> <<EMAIL>>'
__date__ = ' 31 December 2017'
__copyright__ = 'Copyright (c) 2017 <NAME>'
class ClassifierMixin(object):
"""Base class for all classifiers"""
_estimator_type = 'classifier'
class RegressorMixin(object):
"""Base class for all regressors"""
_estimator_type = 'regressor'
| en | 0.658421 | Base class for all classifiers Base class for all regressors | 1.327289 | 1 |
pytorch_test.py | calebyoung26/openvino_test | 0 | 6612316 | <filename>pytorch_test.py<gh_stars>0
import numpy as np
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from PIL import Image
import torch
import time
class Pytorch_test():
def __init__(self):
pass
def run(self, input_img,number_iter=1):
imagenet_mean = (0.485, 0.456, 0.406)
imagenet_std = (0.229, 0.224, 0.225)
transform_test_IMAGENET = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
transform_test = transform_test_IMAGENET
net18 = resnet18(pretrained=True)
net18.eval()
im = Image.open(input_img)
x = transform_test(im)
x = x.unsqueeze(dim=0)
t0 = time.time()
for i in range(number_iter):
res18 = net18(x)[0]
infer_time = time.time()-t0
#print(net18(x)[0].shape)
values, indices = res18.max(0)
values, indices = torch.topk(res18, 10)
labels ="test_model.labels"
if labels:
with open(labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = None
print('<<<<<<<<<<<RESULTS FOR PYTORCH>>>>>>>>>>>>')
for i, probs in enumerate(res18):
if (i<10):
print( labels_map[indices[i]], values[i].item(), indices[i])
print("{:.7f} label {}".format(values[i].item(), labels_map[indices[i]]))
print('\n')
print("PyTorch ran {} iterations in {} seconds".format(number_iter,infer_time))
print('\n') | <filename>pytorch_test.py<gh_stars>0
import numpy as np
from torchvision.models.resnet import resnet18
import torchvision.transforms as transforms
from PIL import Image
import torch
import time
class Pytorch_test():
def __init__(self):
pass
def run(self, input_img,number_iter=1):
imagenet_mean = (0.485, 0.456, 0.406)
imagenet_std = (0.229, 0.224, 0.225)
transform_test_IMAGENET = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
])
transform_test = transform_test_IMAGENET
net18 = resnet18(pretrained=True)
net18.eval()
im = Image.open(input_img)
x = transform_test(im)
x = x.unsqueeze(dim=0)
t0 = time.time()
for i in range(number_iter):
res18 = net18(x)[0]
infer_time = time.time()-t0
#print(net18(x)[0].shape)
values, indices = res18.max(0)
values, indices = torch.topk(res18, 10)
labels ="test_model.labels"
if labels:
with open(labels, 'r') as f:
labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]
else:
labels_map = None
print('<<<<<<<<<<<RESULTS FOR PYTORCH>>>>>>>>>>>>')
for i, probs in enumerate(res18):
if (i<10):
print( labels_map[indices[i]], values[i].item(), indices[i])
print("{:.7f} label {}".format(values[i].item(), labels_map[indices[i]]))
print('\n')
print("PyTorch ran {} iterations in {} seconds".format(number_iter,infer_time))
print('\n') | en | 0.521245 | #print(net18(x)[0].shape) | 2.47042 | 2 |
python/test/test_index.py | InterestingLab/parquet-index | 1 | 6612317 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 Lightcopy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
import uuid
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import lit
from lightcopy.index import Const, QueryContext
class ConstSuite(unittest.TestCase):
def test_parquet_source(self):
self.assertEqual(Const.PARQUET_SOURCE, 'parquet')
def test_metastore_conf(self):
self.assertEqual(Const.METASTORE_LOCATION, 'spark.sql.index.metastore')
class IndexSuite(unittest.TestCase):
def tempdir(self):
"""
Generate random temporary directory path.
"""
path = tempfile.gettempdir()
return os.path.join(path, 'parquet-index-test-' + str(uuid.uuid4()))
def setUp(self):
self.dirpath = self.tempdir()
self.spark = SparkSession.builder \
.master('local[*]') \
.appName('Pyspark test') \
.config(Const.METASTORE_LOCATION, os.path.join(self.dirpath, 'metastore')) \
.config("spark.sql.sources.default", "parquet") \
.getOrCreate()
def tearDown(self):
if self.spark:
self.spark.stop()
# This is added due to bug in Spark 2.0.0 when recreating SparkSession after stop() does
# not create SparkContext in JVM, see SPARK-17261
SparkSession._instantiatedContext = None
self.spark = None
shutil.rmtree(self.dirpath, ignore_errors=True)
def test_index_wrong_init(self):
with self.assertRaises(AttributeError):
QueryContext(None)
def test_manager_set_source(self):
context = QueryContext(self.spark)
manager = context.index.format('test-format')
self.assertEqual(manager._source, 'test-format')
def test_manager_set_many_sources(self):
context = QueryContext(self.spark)
manager = context.index.format('a').format('b').format('c')
self.assertEqual(manager._source, 'c')
def test_manager_set_option(self):
context = QueryContext(self.spark)
manager = context.index.option('key1', '1').option('key2', 2).option('key3', True)
self.assertEqual(manager._options, {'key1': '1', 'key2': '2', 'key3': 'True'})
def test_manager_set_options_wrong(self):
context = QueryContext(self.spark)
with self.assertRaises(AttributeError):
context.index.options(None)
def test_manager_set_options(self):
context = QueryContext(self.spark)
manager = context.index.option('a', '1').options({'a': '2', 'b': 3, 'c': True})
self.assertEqual(manager._options, {'a': '2', 'b': '3', 'c': 'True'})
def test_create_command_mode(self):
context = QueryContext(self.spark)
cmd = context.index.create.mode('overwrite').mode('ignore')
self.assertEqual(cmd._mode, 'ignore')
def test_create_command_wrong_mode(self):
context = QueryContext(self.spark)
cmd = context.index.create.mode(None)
error_msg = None
try:
cmd.parquet(None)
except Exception as err:
error_msg = str(err)
self.assertTrue(error_msg is not None)
self.assertTrue('Unsupported mode None' in error_msg)
def test_create_command_index_by_col(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy('a')
self.assertEqual(cmd._columns, ['a'])
def test_create_command_index_by_cols(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy('a', 'b')
self.assertEqual(cmd._columns, ['a', 'b'])
def test_create_command_index_by_none(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy()
self.assertEqual(cmd._columns, [])
def test_create_command_index_by_all(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexByAll()
self.assertEqual(cmd._columns, None)
def test_create_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_index_cols(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexBy('id', 'str').parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_index_mode(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.mode('error').indexByAll().parquet(table_path)
context.index.create.mode('overwrite').indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_delete_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
context.index.delete.parquet(table_path)
self.assertFalse(context.index.exists.parquet(table_path))
def test_create_query_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
res1 = context.index.parquet(table_path).filter('id = 3').collect()
res2 = self.spark.read.parquet(table_path).filter('id = 3').collect()
self.assertEqual(res1, res2)
def test_create_query_index_empty_table(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).filter('id < 0') \
.withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
res = context.index.parquet(table_path).filter('id = 3').collect()
self.assertEqual(res, [])
def test_create_delete_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
self.assertTrue(context.index.exists.table(tableName))
context.index.delete.table(tableName)
self.assertFalse(context.index.exists.table(tableName))
finally:
self.spark.sql("drop table " + tableName)
def test_create_overwrite_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
context.index.create.mode('overwrite').indexBy('id').table(tableName)
self.assertTrue(context.index.exists.table(tableName))
finally:
self.spark.sql("drop table " + tableName)
def test_create_query_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
res1 = context.index.table(tableName).filter('id = 3').collect()
res2 = self.spark.table(tableName).filter('id = 3').collect()
self.assertEquals(res1, res2)
finally:
self.spark.sql("drop table " + tableName)
# Load test suites
def suites():
return [
ConstSuite,
IndexSuite
]
| #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 Lightcopy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import tempfile
import unittest
import uuid
from pyspark.sql.session import SparkSession
from pyspark.sql.functions import lit
from lightcopy.index import Const, QueryContext
class ConstSuite(unittest.TestCase):
def test_parquet_source(self):
self.assertEqual(Const.PARQUET_SOURCE, 'parquet')
def test_metastore_conf(self):
self.assertEqual(Const.METASTORE_LOCATION, 'spark.sql.index.metastore')
class IndexSuite(unittest.TestCase):
def tempdir(self):
"""
Generate random temporary directory path.
"""
path = tempfile.gettempdir()
return os.path.join(path, 'parquet-index-test-' + str(uuid.uuid4()))
def setUp(self):
self.dirpath = self.tempdir()
self.spark = SparkSession.builder \
.master('local[*]') \
.appName('Pyspark test') \
.config(Const.METASTORE_LOCATION, os.path.join(self.dirpath, 'metastore')) \
.config("spark.sql.sources.default", "parquet") \
.getOrCreate()
def tearDown(self):
if self.spark:
self.spark.stop()
# This is added due to bug in Spark 2.0.0 when recreating SparkSession after stop() does
# not create SparkContext in JVM, see SPARK-17261
SparkSession._instantiatedContext = None
self.spark = None
shutil.rmtree(self.dirpath, ignore_errors=True)
def test_index_wrong_init(self):
with self.assertRaises(AttributeError):
QueryContext(None)
def test_manager_set_source(self):
context = QueryContext(self.spark)
manager = context.index.format('test-format')
self.assertEqual(manager._source, 'test-format')
def test_manager_set_many_sources(self):
context = QueryContext(self.spark)
manager = context.index.format('a').format('b').format('c')
self.assertEqual(manager._source, 'c')
def test_manager_set_option(self):
context = QueryContext(self.spark)
manager = context.index.option('key1', '1').option('key2', 2).option('key3', True)
self.assertEqual(manager._options, {'key1': '1', 'key2': '2', 'key3': 'True'})
def test_manager_set_options_wrong(self):
context = QueryContext(self.spark)
with self.assertRaises(AttributeError):
context.index.options(None)
def test_manager_set_options(self):
context = QueryContext(self.spark)
manager = context.index.option('a', '1').options({'a': '2', 'b': 3, 'c': True})
self.assertEqual(manager._options, {'a': '2', 'b': '3', 'c': 'True'})
def test_create_command_mode(self):
context = QueryContext(self.spark)
cmd = context.index.create.mode('overwrite').mode('ignore')
self.assertEqual(cmd._mode, 'ignore')
def test_create_command_wrong_mode(self):
context = QueryContext(self.spark)
cmd = context.index.create.mode(None)
error_msg = None
try:
cmd.parquet(None)
except Exception as err:
error_msg = str(err)
self.assertTrue(error_msg is not None)
self.assertTrue('Unsupported mode None' in error_msg)
def test_create_command_index_by_col(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy('a')
self.assertEqual(cmd._columns, ['a'])
def test_create_command_index_by_cols(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy('a', 'b')
self.assertEqual(cmd._columns, ['a', 'b'])
def test_create_command_index_by_none(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexBy()
self.assertEqual(cmd._columns, [])
def test_create_command_index_by_all(self):
context = QueryContext(self.spark)
cmd = context.index.create.indexByAll()
self.assertEqual(cmd._columns, None)
def test_create_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_index_cols(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexBy('id', 'str').parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_index_mode(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.mode('error').indexByAll().parquet(table_path)
context.index.create.mode('overwrite').indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
def test_create_delete_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
self.assertTrue(context.index.exists.parquet(table_path))
context.index.delete.parquet(table_path)
self.assertFalse(context.index.exists.parquet(table_path))
def test_create_query_index(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
res1 = context.index.parquet(table_path).filter('id = 3').collect()
res2 = self.spark.read.parquet(table_path).filter('id = 3').collect()
self.assertEqual(res1, res2)
def test_create_query_index_empty_table(self):
context = QueryContext(self.spark)
table_path = os.path.join(self.dirpath, 'table.parquet')
self.spark.range(0, 10).filter('id < 0') \
.withColumn('str', lit('abc')).write.parquet(table_path)
context.index.create.indexByAll().parquet(table_path)
res = context.index.parquet(table_path).filter('id = 3').collect()
self.assertEqual(res, [])
def test_create_delete_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
self.assertTrue(context.index.exists.table(tableName))
context.index.delete.table(tableName)
self.assertFalse(context.index.exists.table(tableName))
finally:
self.spark.sql("drop table " + tableName)
def test_create_overwrite_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
context.index.create.mode('overwrite').indexBy('id').table(tableName)
self.assertTrue(context.index.exists.table(tableName))
finally:
self.spark.sql("drop table " + tableName)
def test_create_query_index_catalog_table(self):
context = QueryContext(self.spark)
tableName = "test_parquet_table"
self.spark.range(0, 10).withColumn('str', lit('abc')).write.saveAsTable(tableName)
try:
context.index.create.indexByAll().table(tableName)
res1 = context.index.table(tableName).filter('id = 3').collect()
res2 = self.spark.table(tableName).filter('id = 3').collect()
self.assertEquals(res1, res2)
finally:
self.spark.sql("drop table " + tableName)
# Load test suites
def suites():
return [
ConstSuite,
IndexSuite
]
| en | 0.807484 | #!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright 2016 Lightcopy # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Generate random temporary directory path. # This is added due to bug in Spark 2.0.0 when recreating SparkSession after stop() does # not create SparkContext in JVM, see SPARK-17261 # Load test suites | 2.179295 | 2 |
cmput_404_project/service/tests/tests_inbox.py | 3662/cmput404-project | 3 | 6612318 | import uuid
import json
from django.test import TestCase, Client
from django.core.exceptions import ObjectDoesNotExist
from social_distribution.models import Author, Post, Inbox, InboxItem, FollowRequest, Like, Comment
from service.models import ServerNode
from .helper import create_dummy_authors, create_dummy_post, create_dummy_posts, create_dummy_comments
class InboxViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(2)
def test_send_posts(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# dummy posts
num_posts = 5
create_dummy_posts(num_posts, sender, 'PUBLIC', 'text/plain')
posts = Post.objects.filter(author=sender).order_by('id')
self.assertEqual(len(posts), num_posts)
# sender sends dummy posts to receiver's inbox
for post in posts:
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(post.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
# assert InboxItems are created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertEqual(len(InboxItem.objects.filter(inbox=receiver_inbox)), num_posts)
# assert their api objects
items = InboxItem.objects.filter(inbox=receiver_inbox).order_by('object_id')
for i in range(len(items)):
self.assertDictEqual(items[i].get_detail_dict(), posts[i].get_detail_dict())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_follow_request(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# valid follow object
data = {
'type': 'Follow',
'summary': 'Test0 wants to follow Test1',
'actor': sender.get_detail_dict(),
'object': receiver.get_detail_dict(),
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(FollowRequest.objects.filter(from_author=sender, to_author=receiver).exists())
fr = FollowRequest.objects.get(from_author=sender, to_author=receiver)
# assert InboxItem is created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=fr.id).exists())
# send Follow object again (should fail)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 400)
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_like(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
# valid like object to post
data = {
'@context': 'https://www.w3.org/ns/activitystreams',
'summary': 'Test0 Likes your post',
'type': 'Like',
'author': sender.get_detail_dict(),
'object': post.get_id_url()
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(Like.objects.filter(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url()).exists())
like = Like.objects.get(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url())
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=like.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_comment(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
create_dummy_comments(1, sender, post)
comment = Comment.objects.get(author=sender, post=post)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(comment.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=comment.get_id_url(), object_id=comment.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
| import uuid
import json
from django.test import TestCase, Client
from django.core.exceptions import ObjectDoesNotExist
from social_distribution.models import Author, Post, Inbox, InboxItem, FollowRequest, Like, Comment
from service.models import ServerNode
from .helper import create_dummy_authors, create_dummy_post, create_dummy_posts, create_dummy_comments
class InboxViewTestCase(TestCase):
def setUp(self):
ServerNode.objects.create(host='testserver', is_local=True)
create_dummy_authors(2)
def test_send_posts(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# dummy posts
num_posts = 5
create_dummy_posts(num_posts, sender, 'PUBLIC', 'text/plain')
posts = Post.objects.filter(author=sender).order_by('id')
self.assertEqual(len(posts), num_posts)
# sender sends dummy posts to receiver's inbox
for post in posts:
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(post.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
# assert InboxItems are created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertEqual(len(InboxItem.objects.filter(inbox=receiver_inbox)), num_posts)
# assert their api objects
items = InboxItem.objects.filter(inbox=receiver_inbox).order_by('object_id')
for i in range(len(items)):
self.assertDictEqual(items[i].get_detail_dict(), posts[i].get_detail_dict())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_follow_request(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
# valid follow object
data = {
'type': 'Follow',
'summary': 'Test0 wants to follow Test1',
'actor': sender.get_detail_dict(),
'object': receiver.get_detail_dict(),
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(FollowRequest.objects.filter(from_author=sender, to_author=receiver).exists())
fr = FollowRequest.objects.get(from_author=sender, to_author=receiver)
# assert InboxItem is created
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=fr.id).exists())
# send Follow object again (should fail)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 400)
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_like(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
# valid like object to post
data = {
'@context': 'https://www.w3.org/ns/activitystreams',
'summary': 'Test0 Likes your post',
'type': 'Like',
'author': sender.get_detail_dict(),
'object': post.get_id_url()
}
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertTrue(Like.objects.filter(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url()).exists())
like = Like.objects.get(author=sender,
author_url=sender.get_id_url(),
object_type='POST',
object_url=post.get_id_url())
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=None, object_id=like.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
def test_send_comment(self):
c = Client()
sender = Author.objects.get(username='test0')
receiver = Author.objects.get(username='test1')
create_dummy_post(receiver)
post = Post.objects.get(author=receiver)
create_dummy_comments(1, sender, post)
comment = Comment.objects.get(author=sender, post=post)
response = c.post(f'/service/authors/{receiver.id}/inbox',
json.dumps(comment.get_detail_dict()),
content_type='application/json')
self.assertEqual(response.status_code, 201)
receiver_inbox = Inbox.objects.get(author=receiver)
self.assertTrue(InboxItem.objects.filter(inbox=receiver_inbox, object_url=comment.get_id_url(), object_id=comment.id).exists())
# clear inbox
response = c.delete(f'/service/authors/{receiver.id}/inbox')
self.assertEqual(response.status_code, 204)
self.assertTrue(not InboxItem.objects.filter(inbox=receiver_inbox).exists())
| en | 0.853289 | # dummy posts # sender sends dummy posts to receiver's inbox # assert InboxItems are created # assert their api objects # clear inbox # valid follow object # assert InboxItem is created # send Follow object again (should fail) # clear inbox # valid like object to post # clear inbox # clear inbox | 2.182317 | 2 |
src/ptrading/signal/_basic.py | abhishekpratapa/ptrading | 0 | 6612319 | <reponame>abhishekpratapa/ptrading
"""
# -*- coding: utf-8 -*-
# Copyright © 2020 <NAME>. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ._signal import SignalBase as _SignalBase
class MovingAverage(_SignalBase):
def __init__(self, ticker: str, buffer_size: int = 1000):
super().__init__(ticker, buffer_size)
def _get_average(self, arr):
price = 0
for a in arr:
price += ((1.0 * (a.open + a.close + a.high + a.low))/ 4.0)
if len(arr) == 0:
return 0.0
else:
return (1.0 * price) / len(arr)
def process_bar_wrapper(self, bar) -> float:
self.buffer.add(bar)
if self.buffer.ready():
arr = self.buffer.get()
return self._get_average(arr)
return 0.0
| """
# -*- coding: utf-8 -*-
# Copyright © 2020 <NAME>. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ._signal import SignalBase as _SignalBase
class MovingAverage(_SignalBase):
def __init__(self, ticker: str, buffer_size: int = 1000):
super().__init__(ticker, buffer_size)
def _get_average(self, arr):
price = 0
for a in arr:
price += ((1.0 * (a.open + a.close + a.high + a.low))/ 4.0)
if len(arr) == 0:
return 0.0
else:
return (1.0 * price) / len(arr)
def process_bar_wrapper(self, bar) -> float:
self.buffer.add(bar)
if self.buffer.ready():
arr = self.buffer.get()
return self._get_average(arr)
return 0.0 | en | 0.916595 | # -*- coding: utf-8 -*- # Copyright © 2020 <NAME>. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can # be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause | 2.009388 | 2 |
design-helper/temple_plates/views.py | cstorey/rustbucks | 0 | 6612320 | <filename>design-helper/temple_plates/views.py
from pyramid.view import view_config
@view_config(route_name='menu', renderer='templates/menu.jinja2')
def menu(request):
return {}
@view_config(route_name='drink', renderer='templates/drink.jinja2')
def drink(request):
drink_id=request.matchdict['id']
return dict(drink_id=drink_id)
@view_config(route_name='new_order', renderer='templates/order.jinja2')
def new_order(request):
print(('matches', request.matchdict))
print(('params', request.params))
drink_id=request.params['drink_id']
return dict(drink_id=drink_id) | <filename>design-helper/temple_plates/views.py
from pyramid.view import view_config
@view_config(route_name='menu', renderer='templates/menu.jinja2')
def menu(request):
return {}
@view_config(route_name='drink', renderer='templates/drink.jinja2')
def drink(request):
drink_id=request.matchdict['id']
return dict(drink_id=drink_id)
@view_config(route_name='new_order', renderer='templates/order.jinja2')
def new_order(request):
print(('matches', request.matchdict))
print(('params', request.params))
drink_id=request.params['drink_id']
return dict(drink_id=drink_id) | none | 1 | 2.315742 | 2 | |
simulation/models/utilities.py | BenLatham/Agricultural-Simulation | 0 | 6612321 | def save_get(self, records, model, scenario):
save_all(records, model)
return get_all(model, scenario)
def save_all(records, model):
for record in records:
record.full_clean() # call full clean on each record
model.objects.bulk_create(records) # save to the database
def get_all(model, scenario):
"""
:param model: a database model with fields scenario and name which are unique together
:return: a dictionary of the fields of the given model corresponding to the current simulation,
with their name fields as key.
"""
records = model.objects.filter(scenario=scenario)
return {record.name: record for record in records} | def save_get(self, records, model, scenario):
save_all(records, model)
return get_all(model, scenario)
def save_all(records, model):
for record in records:
record.full_clean() # call full clean on each record
model.objects.bulk_create(records) # save to the database
def get_all(model, scenario):
"""
:param model: a database model with fields scenario and name which are unique together
:return: a dictionary of the fields of the given model corresponding to the current simulation,
with their name fields as key.
"""
records = model.objects.filter(scenario=scenario)
return {record.name: record for record in records} | en | 0.890637 | # call full clean on each record # save to the database :param model: a database model with fields scenario and name which are unique together :return: a dictionary of the fields of the given model corresponding to the current simulation, with their name fields as key. | 2.808058 | 3 |
tests/test_orca_grid.py | cournape/orca-py | 0 | 6612322 | import unittest
from hypothesis import given
from hypothesis.strategies import characters, sampled_from
from orca.grid import (
GLYPH_TABLE,
DOT_GLYPH,
BANG_GLYPH,
glyph_to_value,
OrcaGrid,
)
from orca.ports import InputPort
# This contains the list of glyph that will give back a value.
#
# We order to have a stable ordered list, but we actually don't care about the
# exact order
GLYPH_WITH_VALUE = sorted(set(GLYPH_TABLE) | set(c.upper() for c in GLYPH_TABLE))
SPECIAL_GLYPHS = [DOT_GLYPH, BANG_GLYPH, None, ""]
@given(sampled_from(GLYPH_WITH_VALUE))
def test_glyph_to_value_acceptable(c):
value = glyph_to_value(c)
assert value >= 0
assert value < 36
@given(sampled_from(GLYPH_WITH_VALUE))
def test_glyph_to_value_upper_lower(c):
assert glyph_to_value(c.lower()) == glyph_to_value(c.upper())
@given(characters())
def test_glyph_to_value_unexpected(c):
value = glyph_to_value(c)
assert value >= -1
assert value < 36
@given(sampled_from(SPECIAL_GLYPHS))
def test_glyph_to_value_special(c):
assert glyph_to_value(c) == 0
def test_create_grid_from_string_simple(self):
# Given
s = "...\n..."
# When
grid = OrcaGrid.from_string(s)
# Then
assert grid.rows == 2
assert grid.cols == 3
def test_create_grid_from_string_simple():
# Given
s = "...\n..."
# When
grid = OrcaGrid.from_string(s)
# Then
assert grid.rows == 2
assert grid.cols == 3
O = OrcaGrid.from_string
class TestGridPeekAndPoke(unittest.TestCase):
def test_peek_in_bounds(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(0, 0)
# Then
assert glyph == DOT_GLYPH
# When
glyph = grid.peek(1, 0)
# Then
assert glyph == "A"
def test_peek_out_of_bounds(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(4, 4)
# Then
assert glyph is None
def test_poke(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(1, 1)
# Then
assert glyph == DOT_GLYPH
# When
grid.poke(1, 1, "3")
glyph = grid.peek(1, 1)
# Then
assert glyph == "3"
def test_poke_out_bounds(self):
# Given
grid = O(".A.\n...")
# When
grid.poke(3, 3, "3")
glyph = grid.peek(3, 3)
# Then
assert glyph is None
class TestGridCompatLayer(unittest.TestCase):
def test_glyph_at(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.glyph_at(0, 0)
# Then
assert glyph == DOT_GLYPH
# When
glyph = grid.glyph_at(1, 0)
# Then
assert glyph == "A"
@given(sampled_from(SPECIAL_GLYPHS))
def test_value_of(self, c):
# Given
grid = O(".")
# When
value = grid.value_of(c)
# Then
assert value == 0
def test_value_at(self):
# Given
grid = O("3")
# When
value = grid.value_at(0, 0)
# Then
assert value == 3
# When
value = grid.value_at(0, 3)
# Then
assert value == 0
# When
value = grid.value_at(-2, 1)
# Then
assert value == 0
class TestGridLook(unittest.TestCase):
def test_lock(self):
# Given
grid = O(".A.\n*.c")
# When
value = grid.lock(0, 1)
# Then
assert grid.is_locked(0, 1) is True
assert grid.is_locked(0, 0) is False
# When
value = grid.reset_locks()
# Then
for x in range(grid.cols):
for y in range(grid.rows):
assert grid.is_locked(x, y) is False
class TestGridListen(unittest.TestCase):
def test_listen_default(self):
# Given
grid = O(".A.\n*.c")
# When
port = InputPort(0, 0)
value = grid.listen(port)
# Then
assert value == DOT_GLYPH
# When
port = InputPort(0, 1)
value = grid.listen(port)
# Then
assert value == BANG_GLYPH
# When
# we listen to a dot port with a default value
port = InputPort(0, 0, default="+")
value = grid.listen(port)
# Then
# we get the default value
assert value == "+"
# When
# we listen to a bang port with a default value
port = InputPort(0, 1, default="+")
value = grid.listen(port)
# Then
# we get the default value
assert value == "+"
# When
port = InputPort(1, 0, default="+")
value = grid.listen(port)
# Then
assert value == "A"
# When
port = InputPort(3, 3, default="+")
value = grid.listen(port)
# Then
assert value is None
def test_listen_as_value_default(self):
# Given
grid = O(".A.\n*.c")
# When
port = InputPort(0, 0)
value = grid.listen_as_value(port)
# Then
assert value == 0
# When
port = InputPort(0, 1)
value = grid.listen_as_value(port)
# Then
assert value == 0
# When
# we listen to a dot port with a default value
port = InputPort(0, 0, default="3")
value = grid.listen_as_value(port)
# Then
# we get the default value
assert value == 3
# When
# we listen to a bang port with a default value
port = InputPort(0, 1, default="4")
value = grid.listen_as_value(port)
# Then
# we get the default value
assert value == 4
# When
port = InputPort(1, 0, default="+")
value = grid.listen_as_value(port)
# Then
assert value == 10
# When
port = InputPort(3, 3, default="+")
value = grid.listen_as_value(port)
# Then
assert value == 0
| import unittest
from hypothesis import given
from hypothesis.strategies import characters, sampled_from
from orca.grid import (
GLYPH_TABLE,
DOT_GLYPH,
BANG_GLYPH,
glyph_to_value,
OrcaGrid,
)
from orca.ports import InputPort
# This contains the list of glyph that will give back a value.
#
# We order to have a stable ordered list, but we actually don't care about the
# exact order
GLYPH_WITH_VALUE = sorted(set(GLYPH_TABLE) | set(c.upper() for c in GLYPH_TABLE))
SPECIAL_GLYPHS = [DOT_GLYPH, BANG_GLYPH, None, ""]
@given(sampled_from(GLYPH_WITH_VALUE))
def test_glyph_to_value_acceptable(c):
value = glyph_to_value(c)
assert value >= 0
assert value < 36
@given(sampled_from(GLYPH_WITH_VALUE))
def test_glyph_to_value_upper_lower(c):
assert glyph_to_value(c.lower()) == glyph_to_value(c.upper())
@given(characters())
def test_glyph_to_value_unexpected(c):
value = glyph_to_value(c)
assert value >= -1
assert value < 36
@given(sampled_from(SPECIAL_GLYPHS))
def test_glyph_to_value_special(c):
assert glyph_to_value(c) == 0
def test_create_grid_from_string_simple(self):
# Given
s = "...\n..."
# When
grid = OrcaGrid.from_string(s)
# Then
assert grid.rows == 2
assert grid.cols == 3
def test_create_grid_from_string_simple():
# Given
s = "...\n..."
# When
grid = OrcaGrid.from_string(s)
# Then
assert grid.rows == 2
assert grid.cols == 3
O = OrcaGrid.from_string
class TestGridPeekAndPoke(unittest.TestCase):
def test_peek_in_bounds(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(0, 0)
# Then
assert glyph == DOT_GLYPH
# When
glyph = grid.peek(1, 0)
# Then
assert glyph == "A"
def test_peek_out_of_bounds(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(4, 4)
# Then
assert glyph is None
def test_poke(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.peek(1, 1)
# Then
assert glyph == DOT_GLYPH
# When
grid.poke(1, 1, "3")
glyph = grid.peek(1, 1)
# Then
assert glyph == "3"
def test_poke_out_bounds(self):
# Given
grid = O(".A.\n...")
# When
grid.poke(3, 3, "3")
glyph = grid.peek(3, 3)
# Then
assert glyph is None
class TestGridCompatLayer(unittest.TestCase):
def test_glyph_at(self):
# Given
grid = O(".A.\n...")
# When
glyph = grid.glyph_at(0, 0)
# Then
assert glyph == DOT_GLYPH
# When
glyph = grid.glyph_at(1, 0)
# Then
assert glyph == "A"
@given(sampled_from(SPECIAL_GLYPHS))
def test_value_of(self, c):
# Given
grid = O(".")
# When
value = grid.value_of(c)
# Then
assert value == 0
def test_value_at(self):
# Given
grid = O("3")
# When
value = grid.value_at(0, 0)
# Then
assert value == 3
# When
value = grid.value_at(0, 3)
# Then
assert value == 0
# When
value = grid.value_at(-2, 1)
# Then
assert value == 0
class TestGridLook(unittest.TestCase):
def test_lock(self):
# Given
grid = O(".A.\n*.c")
# When
value = grid.lock(0, 1)
# Then
assert grid.is_locked(0, 1) is True
assert grid.is_locked(0, 0) is False
# When
value = grid.reset_locks()
# Then
for x in range(grid.cols):
for y in range(grid.rows):
assert grid.is_locked(x, y) is False
class TestGridListen(unittest.TestCase):
def test_listen_default(self):
# Given
grid = O(".A.\n*.c")
# When
port = InputPort(0, 0)
value = grid.listen(port)
# Then
assert value == DOT_GLYPH
# When
port = InputPort(0, 1)
value = grid.listen(port)
# Then
assert value == BANG_GLYPH
# When
# we listen to a dot port with a default value
port = InputPort(0, 0, default="+")
value = grid.listen(port)
# Then
# we get the default value
assert value == "+"
# When
# we listen to a bang port with a default value
port = InputPort(0, 1, default="+")
value = grid.listen(port)
# Then
# we get the default value
assert value == "+"
# When
port = InputPort(1, 0, default="+")
value = grid.listen(port)
# Then
assert value == "A"
# When
port = InputPort(3, 3, default="+")
value = grid.listen(port)
# Then
assert value is None
def test_listen_as_value_default(self):
# Given
grid = O(".A.\n*.c")
# When
port = InputPort(0, 0)
value = grid.listen_as_value(port)
# Then
assert value == 0
# When
port = InputPort(0, 1)
value = grid.listen_as_value(port)
# Then
assert value == 0
# When
# we listen to a dot port with a default value
port = InputPort(0, 0, default="3")
value = grid.listen_as_value(port)
# Then
# we get the default value
assert value == 3
# When
# we listen to a bang port with a default value
port = InputPort(0, 1, default="4")
value = grid.listen_as_value(port)
# Then
# we get the default value
assert value == 4
# When
port = InputPort(1, 0, default="+")
value = grid.listen_as_value(port)
# Then
assert value == 10
# When
port = InputPort(3, 3, default="+")
value = grid.listen_as_value(port)
# Then
assert value == 0
| en | 0.551274 | # This contains the list of glyph that will give back a value. # # We order to have a stable ordered list, but we actually don't care about the # exact order # Given # When # Then # Given # When # Then # Given # When # Then # When # Then # Given # When # Then # Given # When # Then # When # Then # Given # When # Then # Given # When # Then # When # Then # Given # When # Then # Given # When # Then # When # Then # When # Then # Given # When # Then # When # Then # Given # When # Then # When # Then # When # we listen to a dot port with a default value # Then # we get the default value # When # we listen to a bang port with a default value # Then # we get the default value # When # Then # When # Then # Given # When # Then # When # Then # When # we listen to a dot port with a default value # Then # we get the default value # When # we listen to a bang port with a default value # Then # we get the default value # When # Then # When # Then | 2.75139 | 3 |
buffer.py | matbut/Biomechanical-Analysis-Of-Running | 0 | 6612323 | import random
from collections import deque
import numpy as np
class Buffer:
def __init__(self):
self.memory = deque(maxlen=2000)
def remember(self, cur_state, action, reward, new_state, done):
self.memory.append([cur_state, action, reward, new_state, done])
def get_random_batch(self, batch_size):
batch = random.sample(self.memory, batch_size)
return [np.array([_[i] for _ in batch]) for i in range(5)]
| import random
from collections import deque
import numpy as np
class Buffer:
def __init__(self):
self.memory = deque(maxlen=2000)
def remember(self, cur_state, action, reward, new_state, done):
self.memory.append([cur_state, action, reward, new_state, done])
def get_random_batch(self, batch_size):
batch = random.sample(self.memory, batch_size)
return [np.array([_[i] for _ in batch]) for i in range(5)]
| none | 1 | 2.955619 | 3 | |
KHK Light Show/KHK Light Show/2.x.x/2.0.0/Old/BeatDetector_old.py | KappaEtaKappa/Sound-To-Disco | 0 | 6612324 | ################################# BeatDetector #################################
# Author: <NAME>
#
# Description: The BeatDetector Class is responsible for storing and analyzing
# data necessary for beat detection. It only detects beats for
# one set of data, so, for instance, if you performed an FFT on an
# audio signal, separated the signal into several frequency bands,
# and then wanted to perform beat detection on each band
# simultaneously, then you would need to create a separate
# BeatDetector for each frequency band.
#
from scipy import *
class BeatDetector:
##### Instance Variables #####
#beatDetected; # boolean: True if beat was detected
#triggerConstant; # float: Constant used for comparison of energyLevel to
# historyBuffer
#triggerCalc_A # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#triggerCalc_B # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#dynamicTrigger # boolean: True if triggerConstant should be calculated
# dynamically using variance and a linear
# regression.
#energyLevel; # float: Intensity of the sample last analyzed.
#historyBuffer; # float[]: bufferSize past energyLevels. Most Recent
# is at pHistoryBuffer.
#beatHistory; # boolean[]: Past beatDetecteds aligned
# with historyBuffer
#bufferSize; # int: Total size of the historyBuffer.
#pHistoryBuffer; # int: Starting location in historyBuffer Array
#pHistoryEnd; # int: Last value that should be included in history
# averaging.
#dynamicHistory; # boolean: True if number of samples for historyBuffer
# averaging should be calculated dynamically.
##### Constructors #####
# __init__
#
# Default constructor. For parameter descriptions, see above.
# If dynamicTrigger = False, then triggerCalc A & B must be specified.
# Otherwise, triggerConst must be specified.
#
# parameters: dynamicTrigger - boolean
# triggerConst - double
# triggerCalc_A - double
# triggerCalc_B - double
# dynamicHistory - boolean
# bufferSize - int
#
def _init_(self, dynamicTrigger, triggerConst, triggerCalc_A, triggerCalc_B,
dynamicHistory, bufferSize):
self.beatDetected = False;
self.triggerConstant = triggerConst;
self.triggerCalc_A = triggerCalc_A;
self.triggerCalc_B = triggerCalc_B;
self.dynamicTrigger = dynamicTrigger;
self.energyLevel = 0;
self.bufferSize = bufferSize;
self.historyBuffer = zeros(bufferSize);
self.beatHistory = zeros(bufferSize);
self.pHistoryBuffer = 0;
self.pHistoryEnd = 0;
self.dynamicHistory = dynamicHistory;
##### Methods #####
# getHistoryBuffer(self)
#
# Author: <NAME>
#
# Description: returns the historyBuffer used to calculate last beatDetect.
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array representing the History Buffer used for
# calculations. The most recent value is stored at location
# 0.
#
def getHistoryBuffer(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.historyBuffer[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getHistoryBuffer_Full(self)
#
# Author: <NAME>
#
# Description: returns the entire historyBuffer
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing every stored sample in History. The
# most recent value is stored at location 0.
#
def getHistoryBuffer_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = historyBuffer[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory(self)
#
# Author: <NAME>
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer(self).
#
def getBeatHistory(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.beatHistory[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory_Full(self)
#
# Author: <NAME>
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer_Full(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer_Full(self).
#
def getBeatHistory_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = beatHistory[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# gettriggerConstant(self)
#
# Author: <NAME>
#
# Description: returns the last triggerConstant used. Be it dynamic or
# static.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the triggerConstant last used.
#
def getTriggerConstant(self):
return self.triggerConstant;
# getBufferSize(self)
#
# Author: <NAME>
#
# Description: Returns the size of the part of the historyBuffer last used
# for calculations.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the historyBuffer last used.
#
def getBufferSize(self):
return abs(self.pHistoryEnd - self.pHistoryBuffer) + 1;
# getBufferCalcSize(self)
#
# Author: <NAME>
#
# Description: Returns the size of the entire historyBuffer.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the full historyBuffer.
#
def getBufferSize_Full(self):
return self.bufferSize;
# isDynamicTrigger(self)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the TriggerConstant is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the TriggerConstant is being
# calculated dynamically.
#
def isDynamicTrigger(self):
return self.dynamicTrigger;
# isDynamicTrigger(self)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the bufferSize is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the bufferSize is being
# calculated dynamically.
#
def isDynamicHistory(self):
return self.dynamicHistory;
# detectBeat(self, audioSample)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the audioSample given
# represents a beat.
#
# Parameters: audioSample - Array of values representing audio intensity.
#
# Modifies: energyLevel
# beatDetected
# historyBuffer
# beatHistory
# triggerConstant (if dynamicTrigger = True)
# pHistoryBuffer
#
# Returns: boolean representing if a beat was detected.
#
def detectBeat(self, audioSample):
# Calculate instant sound energy
energyLevel = sum(abs(audioSample));
#Compute triggerLevel
if(dynamicTrigger):
triggerConstant = triggerCalc(self.getHistoryBuffer());
# Check for beat
if energyLevel > triggerConstant * average(self.getHistoryBuffer()):
beatDetected = True;
else:
beatDetected = False;
# Update History Buffer
historyBuffer[pHistoryBuffer] = energyLevel;
beatHistory[pHistoryBuffer] = beatDetected;
pHistoryBuffer = pHistoryBuffer + 1;
pHistoryEnd = pHistoryEnd + 1;
if(pHistoryBuffer == bufferSize):
pHistoryBuffer = 0;
if(pHistoryEnd == bufferSize):
pHistoryEnd = 0;
if(dynamicHistory):
self.historySizeCalc();
# Return and Exit
return beatDetected;
# historySizeCalc(self) #####################UNFINISHED#####################
#
# Author: <NAME>
#
# Description: Analyzes the Beat History, and lengthens or shortens the
# historyBuffer accordingly.
#
# Parameters: none
#
# Modifies: pHistoryEnd
#
# Returns: none
#
def historySizeCalc(self):
pass
# detectBeat(history)
#
# Author: <NAME>
#
# Description: Calculates a triggerConstant from the history given. The
# calculation is done based on variance. The variance is
# calculated across the history and is then entered into a
# linear regression model given by the constants A & B.
# These values are specified during object creation and should
# not be modified.
#
# Parameters: history - Array of values for variance calculation
#
# Modifies: none
#
# Returns: Value of proper triggerConstant for the given history.
#
def triggerCalc(history):
#Compute Variance
v = 0;
for a in range(0, len(history)-1):
v += history[a] - average(history);
v = v / len(history);
#Compute triggerLevel
triggerLevel = triggerCalc_A * v + triggerCalc_B;
return triggerLevel; | ################################# BeatDetector #################################
# Author: <NAME>
#
# Description: The BeatDetector Class is responsible for storing and analyzing
# data necessary for beat detection. It only detects beats for
# one set of data, so, for instance, if you performed an FFT on an
# audio signal, separated the signal into several frequency bands,
# and then wanted to perform beat detection on each band
# simultaneously, then you would need to create a separate
# BeatDetector for each frequency band.
#
from scipy import *
class BeatDetector:
##### Instance Variables #####
#beatDetected; # boolean: True if beat was detected
#triggerConstant; # float: Constant used for comparison of energyLevel to
# historyBuffer
#triggerCalc_A # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#triggerCalc_B # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#dynamicTrigger # boolean: True if triggerConstant should be calculated
# dynamically using variance and a linear
# regression.
#energyLevel; # float: Intensity of the sample last analyzed.
#historyBuffer; # float[]: bufferSize past energyLevels. Most Recent
# is at pHistoryBuffer.
#beatHistory; # boolean[]: Past beatDetecteds aligned
# with historyBuffer
#bufferSize; # int: Total size of the historyBuffer.
#pHistoryBuffer; # int: Starting location in historyBuffer Array
#pHistoryEnd; # int: Last value that should be included in history
# averaging.
#dynamicHistory; # boolean: True if number of samples for historyBuffer
# averaging should be calculated dynamically.
##### Constructors #####
# __init__
#
# Default constructor. For parameter descriptions, see above.
# If dynamicTrigger = False, then triggerCalc A & B must be specified.
# Otherwise, triggerConst must be specified.
#
# parameters: dynamicTrigger - boolean
# triggerConst - double
# triggerCalc_A - double
# triggerCalc_B - double
# dynamicHistory - boolean
# bufferSize - int
#
def _init_(self, dynamicTrigger, triggerConst, triggerCalc_A, triggerCalc_B,
dynamicHistory, bufferSize):
self.beatDetected = False;
self.triggerConstant = triggerConst;
self.triggerCalc_A = triggerCalc_A;
self.triggerCalc_B = triggerCalc_B;
self.dynamicTrigger = dynamicTrigger;
self.energyLevel = 0;
self.bufferSize = bufferSize;
self.historyBuffer = zeros(bufferSize);
self.beatHistory = zeros(bufferSize);
self.pHistoryBuffer = 0;
self.pHistoryEnd = 0;
self.dynamicHistory = dynamicHistory;
##### Methods #####
# getHistoryBuffer(self)
#
# Author: <NAME>
#
# Description: returns the historyBuffer used to calculate last beatDetect.
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array representing the History Buffer used for
# calculations. The most recent value is stored at location
# 0.
#
def getHistoryBuffer(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.historyBuffer[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getHistoryBuffer_Full(self)
#
# Author: <NAME>
#
# Description: returns the entire historyBuffer
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing every stored sample in History. The
# most recent value is stored at location 0.
#
def getHistoryBuffer_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = historyBuffer[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory(self)
#
# Author: <NAME>
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer(self).
#
def getBeatHistory(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.beatHistory[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory_Full(self)
#
# Author: <NAME>
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer_Full(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer_Full(self).
#
def getBeatHistory_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = beatHistory[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# gettriggerConstant(self)
#
# Author: <NAME>
#
# Description: returns the last triggerConstant used. Be it dynamic or
# static.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the triggerConstant last used.
#
def getTriggerConstant(self):
return self.triggerConstant;
# getBufferSize(self)
#
# Author: <NAME>
#
# Description: Returns the size of the part of the historyBuffer last used
# for calculations.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the historyBuffer last used.
#
def getBufferSize(self):
return abs(self.pHistoryEnd - self.pHistoryBuffer) + 1;
# getBufferCalcSize(self)
#
# Author: <NAME>
#
# Description: Returns the size of the entire historyBuffer.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the full historyBuffer.
#
def getBufferSize_Full(self):
return self.bufferSize;
# isDynamicTrigger(self)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the TriggerConstant is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the TriggerConstant is being
# calculated dynamically.
#
def isDynamicTrigger(self):
return self.dynamicTrigger;
# isDynamicTrigger(self)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the bufferSize is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the bufferSize is being
# calculated dynamically.
#
def isDynamicHistory(self):
return self.dynamicHistory;
# detectBeat(self, audioSample)
#
# Author: <NAME>
#
# Description: Returns a boolean representing if the audioSample given
# represents a beat.
#
# Parameters: audioSample - Array of values representing audio intensity.
#
# Modifies: energyLevel
# beatDetected
# historyBuffer
# beatHistory
# triggerConstant (if dynamicTrigger = True)
# pHistoryBuffer
#
# Returns: boolean representing if a beat was detected.
#
def detectBeat(self, audioSample):
# Calculate instant sound energy
energyLevel = sum(abs(audioSample));
#Compute triggerLevel
if(dynamicTrigger):
triggerConstant = triggerCalc(self.getHistoryBuffer());
# Check for beat
if energyLevel > triggerConstant * average(self.getHistoryBuffer()):
beatDetected = True;
else:
beatDetected = False;
# Update History Buffer
historyBuffer[pHistoryBuffer] = energyLevel;
beatHistory[pHistoryBuffer] = beatDetected;
pHistoryBuffer = pHistoryBuffer + 1;
pHistoryEnd = pHistoryEnd + 1;
if(pHistoryBuffer == bufferSize):
pHistoryBuffer = 0;
if(pHistoryEnd == bufferSize):
pHistoryEnd = 0;
if(dynamicHistory):
self.historySizeCalc();
# Return and Exit
return beatDetected;
# historySizeCalc(self) #####################UNFINISHED#####################
#
# Author: <NAME>
#
# Description: Analyzes the Beat History, and lengthens or shortens the
# historyBuffer accordingly.
#
# Parameters: none
#
# Modifies: pHistoryEnd
#
# Returns: none
#
def historySizeCalc(self):
pass
# detectBeat(history)
#
# Author: <NAME>
#
# Description: Calculates a triggerConstant from the history given. The
# calculation is done based on variance. The variance is
# calculated across the history and is then entered into a
# linear regression model given by the constants A & B.
# These values are specified during object creation and should
# not be modified.
#
# Parameters: history - Array of values for variance calculation
#
# Modifies: none
#
# Returns: Value of proper triggerConstant for the given history.
#
def triggerCalc(history):
#Compute Variance
v = 0;
for a in range(0, len(history)-1):
v += history[a] - average(history);
v = v / len(history);
#Compute triggerLevel
triggerLevel = triggerCalc_A * v + triggerCalc_B;
return triggerLevel; | en | 0.72413 | ################################# BeatDetector ################################# # Author: <NAME> # # Description: The BeatDetector Class is responsible for storing and analyzing # data necessary for beat detection. It only detects beats for # one set of data, so, for instance, if you performed an FFT on an # audio signal, separated the signal into several frequency bands, # and then wanted to perform beat detection on each band # simultaneously, then you would need to create a separate # BeatDetector for each frequency band. # ##### Instance Variables ##### #beatDetected; # boolean: True if beat was detected #triggerConstant; # float: Constant used for comparison of energyLevel to # historyBuffer #triggerCalc_A # float: Constant used for triggerConstant generation # from equation: C = AV+B. #triggerCalc_B # float: Constant used for triggerConstant generation # from equation: C = AV+B. #dynamicTrigger # boolean: True if triggerConstant should be calculated # dynamically using variance and a linear # regression. #energyLevel; # float: Intensity of the sample last analyzed. #historyBuffer; # float[]: bufferSize past energyLevels. Most Recent # is at pHistoryBuffer. #beatHistory; # boolean[]: Past beatDetecteds aligned # with historyBuffer #bufferSize; # int: Total size of the historyBuffer. #pHistoryBuffer; # int: Starting location in historyBuffer Array #pHistoryEnd; # int: Last value that should be included in history # averaging. #dynamicHistory; # boolean: True if number of samples for historyBuffer # averaging should be calculated dynamically. ##### Constructors ##### # __init__ # # Default constructor. For parameter descriptions, see above. # If dynamicTrigger = False, then triggerCalc A & B must be specified. # Otherwise, triggerConst must be specified. # # parameters: dynamicTrigger - boolean # triggerConst - double # triggerCalc_A - double # triggerCalc_B - double # dynamicHistory - boolean # bufferSize - int # ##### Methods ##### # getHistoryBuffer(self) # # Author: <NAME> # # Description: returns the historyBuffer used to calculate last beatDetect. # # Parameters: na # # Modifies: none # # Returns: An array representing the History Buffer used for # calculations. The most recent value is stored at location # 0. # # getHistoryBuffer_Full(self) # # Author: <NAME> # # Description: returns the entire historyBuffer # # Parameters: na # # Modifies: none # # Returns: An array containing every stored sample in History. The # most recent value is stored at location 0. # # getBeatHistory(self) # # Author: <NAME> # # Description: returns the beatHistory corresponding to the array returned # by getHistoryBuffer(self). # # Parameters: na # # Modifies: none # # Returns: An array containing booleans representing beats. One-to-one # correspondance to the array returned by # getHistoryBuffer(self). # # getBeatHistory_Full(self) # # Author: <NAME> # # Description: returns the beatHistory corresponding to the array returned # by getHistoryBuffer_Full(self). # # Parameters: na # # Modifies: none # # Returns: An array containing booleans representing beats. One-to-one # correspondance to the array returned by # getHistoryBuffer_Full(self). # # gettriggerConstant(self) # # Author: <NAME> # # Description: returns the last triggerConstant used. Be it dynamic or # static. # # Parameters: na # # Modifies: none # # Returns: A number indicating the triggerConstant last used. # # getBufferSize(self) # # Author: <NAME> # # Description: Returns the size of the part of the historyBuffer last used # for calculations. # # Parameters: na # # Modifies: none # # Returns: A number indicating the size of the historyBuffer last used. # # getBufferCalcSize(self) # # Author: <NAME> # # Description: Returns the size of the entire historyBuffer. # # Parameters: na # # Modifies: none # # Returns: A number indicating the size of the full historyBuffer. # # isDynamicTrigger(self) # # Author: <NAME> # # Description: Returns a boolean representing if the TriggerConstant is # being calculated dynamically. This value is specified at # object construction and should not be changed. # # Parameters: na # # Modifies: none # # Returns: boolean representing if the TriggerConstant is being # calculated dynamically. # # isDynamicTrigger(self) # # Author: <NAME> # # Description: Returns a boolean representing if the bufferSize is # being calculated dynamically. This value is specified at # object construction and should not be changed. # # Parameters: na # # Modifies: none # # Returns: boolean representing if the bufferSize is being # calculated dynamically. # # detectBeat(self, audioSample) # # Author: <NAME> # # Description: Returns a boolean representing if the audioSample given # represents a beat. # # Parameters: audioSample - Array of values representing audio intensity. # # Modifies: energyLevel # beatDetected # historyBuffer # beatHistory # triggerConstant (if dynamicTrigger = True) # pHistoryBuffer # # Returns: boolean representing if a beat was detected. # # Calculate instant sound energy #Compute triggerLevel # Check for beat # Update History Buffer # Return and Exit # historySizeCalc(self) #####################UNFINISHED##################### # # Author: <NAME> # # Description: Analyzes the Beat History, and lengthens or shortens the # historyBuffer accordingly. # # Parameters: none # # Modifies: pHistoryEnd # # Returns: none # # detectBeat(history) # # Author: <NAME> # # Description: Calculates a triggerConstant from the history given. The # calculation is done based on variance. The variance is # calculated across the history and is then entered into a # linear regression model given by the constants A & B. # These values are specified during object creation and should # not be modified. # # Parameters: history - Array of values for variance calculation # # Modifies: none # # Returns: Value of proper triggerConstant for the given history. # #Compute Variance #Compute triggerLevel | 2.9188 | 3 |
pro/tests.py | ckelly/django-paypal | 1 | 6612325 | IPN_TEST = {
'notify_version ': '2.4',
'last_name': 'Smith',
'receiver_email': '<EMAIL>',
'payment_status': '2',
'mc_fee': '0.44',
'tax': '2.02',
'parent_txn_id ': '',
'item_name1': 'something',
'residence_country': '182',
'invoice': 'abc1234',
'address_state': 'CA',
'payer_status': '0',
'txn_type': 'cart',
'address_street': '123, any street',
'quantity1': '1',
'payment_date': '22:56:14 Feb. 02, 2009 PST',
'first_name': 'John',
'item_number1': 'AK-1234',
'item_name': '',
'address_country': '182',
'ipn_type': '4',
'mc_gross1': '9.34',
'custom': 'xyz123',
'for_auction': '',
'address_name': '<NAME>',
'pending_reason': '',
'item_number': '',
'receiver_id': 'TESTSELLERID1',
'reason_code': '',
'business': '',
'txn_id ': '1423656',
'payer_id': 'TESTBUYERID01',
'mc_handling1': '1.67',
'notify_url': 'http://216.19.180.83:8000/ipn/',
'auction_closing_date': '',
'mc_handling': '2.06',
'auction_buyer_id': '',
'address_zip': '95131',
'address_country_code': '182',
'address_city': 'San Jose',
'address_status': '1',
'mc_shipping': '3.02',
'cmd': '_send_ipn-session',
'mc_currency': '15',
'shipping': '',
'payer_email': '<EMAIL>',
'payment_type': '1',
'receipt_ID': '',
'mc_gross': '',
'mc_shipping1': '1.02',
'quantity': ''
}
| IPN_TEST = {
'notify_version ': '2.4',
'last_name': 'Smith',
'receiver_email': '<EMAIL>',
'payment_status': '2',
'mc_fee': '0.44',
'tax': '2.02',
'parent_txn_id ': '',
'item_name1': 'something',
'residence_country': '182',
'invoice': 'abc1234',
'address_state': 'CA',
'payer_status': '0',
'txn_type': 'cart',
'address_street': '123, any street',
'quantity1': '1',
'payment_date': '22:56:14 Feb. 02, 2009 PST',
'first_name': 'John',
'item_number1': 'AK-1234',
'item_name': '',
'address_country': '182',
'ipn_type': '4',
'mc_gross1': '9.34',
'custom': 'xyz123',
'for_auction': '',
'address_name': '<NAME>',
'pending_reason': '',
'item_number': '',
'receiver_id': 'TESTSELLERID1',
'reason_code': '',
'business': '',
'txn_id ': '1423656',
'payer_id': 'TESTBUYERID01',
'mc_handling1': '1.67',
'notify_url': 'http://216.19.180.83:8000/ipn/',
'auction_closing_date': '',
'mc_handling': '2.06',
'auction_buyer_id': '',
'address_zip': '95131',
'address_country_code': '182',
'address_city': 'San Jose',
'address_status': '1',
'mc_shipping': '3.02',
'cmd': '_send_ipn-session',
'mc_currency': '15',
'shipping': '',
'payer_email': '<EMAIL>',
'payment_type': '1',
'receipt_ID': '',
'mc_gross': '',
'mc_shipping1': '1.02',
'quantity': ''
}
| none | 1 | 1.124762 | 1 | |
backend/submission/tests.py | skku-npc/SKKU_Coding_Platform | 1 | 6612326 | from copy import deepcopy
from unittest import mock
from problem.models import Problem, ProblemTag
from problem.tests import DEFAULT_PROBLEM_DATA
from course.models import Course, Registration
from course.tests import DEFAULT_COURSE_DATA
from assignment.models import Assignment
from assignment.tests import DEFAULT_ASSIGNMENT_DATA
from utils.api.tests import APITestCase
from .models import Submission
DEFAULT_SUBMISSION_DATA = {
"problem_id": "1",
"user_id": 1,
"username": "test",
"code": "xxxxxxxxxxxxxx",
"result": -2,
"info": {},
"language": "C",
"statistic_info": {"score": 0, "err_info": "test"}
}
# todo contest submission
class SubmissionPrepare(APITestCase):
def _create_problem_and_submission(self):
user = self.create_admin("test", "<PASSWORD>", login=False)
problem_data = deepcopy(DEFAULT_PROBLEM_DATA)
tags = problem_data.pop("tags")
problem_data["created_by"] = user
self.problem = Problem.objects.create(**problem_data)
for tag in tags:
tag = ProblemTag.objects.create(name=tag)
self.problem.tags.add(tag)
self.problem.save()
self.submission_data = deepcopy(DEFAULT_SUBMISSION_DATA)
self.submission_data["problem_id"] = self.problem.id
self.submission = Submission.objects.create(**self.submission_data)
def _create_assignment_submission(self):
professor = self.create_admin()
self.course_id = Course.objects.create(created_by=professor, **DEFAULT_COURSE_DATA).id
assignment_data = deepcopy(DEFAULT_ASSIGNMENT_DATA)
assignment_data["course_id"] = self.course_id
self.assignment_id = Assignment.objects.create(created_by=professor, **assignment_data).id
self.problem_data = deepcopy(DEFAULT_PROBLEM_DATA)
self.problem_data["assignment_id"] = self.assignment_id
self.problem = self.client.post(self.reverse("assignment_problem_professor_api"), data=self.problem_data).data["data"]
self.submission_data = deepcopy(DEFAULT_SUBMISSION_DATA)
self.submission_data["problem_id"] = self.problem["id"]
self.submission_data["assignment_id"] = self.assignment_id
self.submission = Submission.objects.create(**self.submission_data)
class SubmissionListTest(SubmissionPrepare):
def setUp(self):
self._create_problem_and_submission()
self.create_user("123", "345")
self.url = self.reverse("submission_list_api")
def test_get_submission_list(self):
resp = self.client.get(self.url, data={"limit": "10"})
self.assertSuccess(resp)
@mock.patch("judge.tasks.judge_task.send")
class SubmissionAPITest(SubmissionPrepare):
def setUp(self):
self.url = self.reverse("submission_api")
def test_create_submission(self, judge_task):
self._create_problem_and_submission()
self.create_user("123", "test123")
resp = self.client.post(self.url, self.submission_data)
self.assertSuccess(resp)
judge_task.assert_called()
def test_create_assignment_submission(self, judge_task):
self._create_assignment_submission()
student = self.create_user("123", "test123")
Registration.objects.create(user_id=student.id, course_id=self.course_id)
resp = self.client.post(self.url, self.submission_data)
self.assertSuccess(resp)
judge_task.assert_called()
def test_create_submission_with_wrong_language(self, judge_task):
self._create_problem_and_submission()
self.create_user("123", "test123")
self.submission_data.update({"language": "Python3"})
resp = self.client.post(self.url, self.submission_data)
self.assertFailed(resp)
self.assertDictEqual(resp.data, {"error": "error",
"data": "Python3 is now allowed in the problem"})
judge_task.assert_not_called()
class AssignmentSubmissionListTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("assignment_submission_list_api")
def test_get_assignment_submission_list(self):
problem_id = self.problem["_id"]
resp = self.client.get(f"{self.url}?assignment_id={self.assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
def test_get_student_assignment_submission_list(self):
student = self.create_user("2020123123", "123")
Registration.objects.create(user_id=student.id, course_id=self.course_id)
self.submission_data["user_id"] = student.id
self.submission_data["username"] = student.username
Submission.objects.create(**self.submission_data)
problem_id = self.problem["_id"]
resp = self.client.get(f"{self.url}?assignment_id={self.assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
class AssignmentSubmissionListProfessorTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("assignment_submission_list_professor_api")
def test_get_assignment_submission_list_professor(self):
assignment_id = self.assignment_id
problem_id = self.problem["id"]
resp = self.client.get(f"{self.url}?assignment_id={assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
class EditSubmissionScoreTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("edit_submission_score_api")
def test_edit_submission_score(self):
submission_id = self.submission.id
data = {"id": submission_id, "score": 100}
resp = self.client.put(self.url, data=data)
self.assertSuccess(resp)
resp_data = resp.data["data"]
self.assertEqual(resp_data["statistic_info"]["score"], 100)
| from copy import deepcopy
from unittest import mock
from problem.models import Problem, ProblemTag
from problem.tests import DEFAULT_PROBLEM_DATA
from course.models import Course, Registration
from course.tests import DEFAULT_COURSE_DATA
from assignment.models import Assignment
from assignment.tests import DEFAULT_ASSIGNMENT_DATA
from utils.api.tests import APITestCase
from .models import Submission
DEFAULT_SUBMISSION_DATA = {
"problem_id": "1",
"user_id": 1,
"username": "test",
"code": "xxxxxxxxxxxxxx",
"result": -2,
"info": {},
"language": "C",
"statistic_info": {"score": 0, "err_info": "test"}
}
# todo contest submission
class SubmissionPrepare(APITestCase):
def _create_problem_and_submission(self):
user = self.create_admin("test", "<PASSWORD>", login=False)
problem_data = deepcopy(DEFAULT_PROBLEM_DATA)
tags = problem_data.pop("tags")
problem_data["created_by"] = user
self.problem = Problem.objects.create(**problem_data)
for tag in tags:
tag = ProblemTag.objects.create(name=tag)
self.problem.tags.add(tag)
self.problem.save()
self.submission_data = deepcopy(DEFAULT_SUBMISSION_DATA)
self.submission_data["problem_id"] = self.problem.id
self.submission = Submission.objects.create(**self.submission_data)
def _create_assignment_submission(self):
professor = self.create_admin()
self.course_id = Course.objects.create(created_by=professor, **DEFAULT_COURSE_DATA).id
assignment_data = deepcopy(DEFAULT_ASSIGNMENT_DATA)
assignment_data["course_id"] = self.course_id
self.assignment_id = Assignment.objects.create(created_by=professor, **assignment_data).id
self.problem_data = deepcopy(DEFAULT_PROBLEM_DATA)
self.problem_data["assignment_id"] = self.assignment_id
self.problem = self.client.post(self.reverse("assignment_problem_professor_api"), data=self.problem_data).data["data"]
self.submission_data = deepcopy(DEFAULT_SUBMISSION_DATA)
self.submission_data["problem_id"] = self.problem["id"]
self.submission_data["assignment_id"] = self.assignment_id
self.submission = Submission.objects.create(**self.submission_data)
class SubmissionListTest(SubmissionPrepare):
def setUp(self):
self._create_problem_and_submission()
self.create_user("123", "345")
self.url = self.reverse("submission_list_api")
def test_get_submission_list(self):
resp = self.client.get(self.url, data={"limit": "10"})
self.assertSuccess(resp)
@mock.patch("judge.tasks.judge_task.send")
class SubmissionAPITest(SubmissionPrepare):
def setUp(self):
self.url = self.reverse("submission_api")
def test_create_submission(self, judge_task):
self._create_problem_and_submission()
self.create_user("123", "test123")
resp = self.client.post(self.url, self.submission_data)
self.assertSuccess(resp)
judge_task.assert_called()
def test_create_assignment_submission(self, judge_task):
self._create_assignment_submission()
student = self.create_user("123", "test123")
Registration.objects.create(user_id=student.id, course_id=self.course_id)
resp = self.client.post(self.url, self.submission_data)
self.assertSuccess(resp)
judge_task.assert_called()
def test_create_submission_with_wrong_language(self, judge_task):
self._create_problem_and_submission()
self.create_user("123", "test123")
self.submission_data.update({"language": "Python3"})
resp = self.client.post(self.url, self.submission_data)
self.assertFailed(resp)
self.assertDictEqual(resp.data, {"error": "error",
"data": "Python3 is now allowed in the problem"})
judge_task.assert_not_called()
class AssignmentSubmissionListTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("assignment_submission_list_api")
def test_get_assignment_submission_list(self):
problem_id = self.problem["_id"]
resp = self.client.get(f"{self.url}?assignment_id={self.assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
def test_get_student_assignment_submission_list(self):
student = self.create_user("2020123123", "123")
Registration.objects.create(user_id=student.id, course_id=self.course_id)
self.submission_data["user_id"] = student.id
self.submission_data["username"] = student.username
Submission.objects.create(**self.submission_data)
problem_id = self.problem["_id"]
resp = self.client.get(f"{self.url}?assignment_id={self.assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
class AssignmentSubmissionListProfessorTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("assignment_submission_list_professor_api")
def test_get_assignment_submission_list_professor(self):
assignment_id = self.assignment_id
problem_id = self.problem["id"]
resp = self.client.get(f"{self.url}?assignment_id={assignment_id}&problem_id={problem_id}")
self.assertSuccess(resp)
class EditSubmissionScoreTest(SubmissionPrepare):
def setUp(self):
self._create_assignment_submission()
self.url = self.reverse("edit_submission_score_api")
def test_edit_submission_score(self):
submission_id = self.submission.id
data = {"id": submission_id, "score": 100}
resp = self.client.put(self.url, data=data)
self.assertSuccess(resp)
resp_data = resp.data["data"]
self.assertEqual(resp_data["statistic_info"]["score"], 100)
| en | 0.517872 | # todo contest submission | 2.623542 | 3 |
backend/app/app/db/init_db.py | god00/matching_card_game | 1 | 6612327 | <gh_stars>1-10
import app.db.base
from app.db.get_db_url import get_db_url
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = get_db_url()
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
| import app.db.base
from app.db.get_db_url import get_db_url
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
SQLALCHEMY_DATABASE_URL = get_db_url()
engine = create_engine(SQLALCHEMY_DATABASE_URL)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) | none | 1 | 2.262943 | 2 | |
conftest_source_nox.py | openSUSE/salt-toaster | 25 | 6612328 | <gh_stars>10-100
"""
:codeauthor: <NAME> (<EMAIL>)
tests.conftest
~~~~~~~~~~~~~~
Prepare py.test for our test suite
"""
# pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated
# pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated
import logging
import os
import pathlib
import pprint
import re
import shutil
import ssl
import stat
import sys
import textwrap
from functools import partial, wraps
from unittest import TestCase # pylint: disable=blacklisted-module
import _pytest.logging
import _pytest.skipping
import psutil
import pytest
import salt._logging.impl
import salt.config
import salt.loader
import salt.log.mixins
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.win_functions
import salt.version
import saltfactories.utils.compat
from salt.serializers import yaml
from salt.utils.immutabletypes import freeze
from tests.support.helpers import (
PRE_PYTEST_SKIP_OR_NOT,
PRE_PYTEST_SKIP_REASON,
Webserver,
get_virtualenv_binary_path,
)
from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import check_required_sminion_attributes, create_sminion
#
# Toaster specifics
import glob
from fnmatch import fnmatch
TESTS_DIR = pathlib.Path.cwd() / "tests"
PYTESTS_DIR = TESTS_DIR / "pytests"
CODE_DIR = TESTS_DIR.parent
# Change to code checkout directory
os.chdir(str(CODE_DIR))
# Make sure the current directory is the first item in sys.path
if str(CODE_DIR) in sys.path:
sys.path.remove(str(CODE_DIR))
sys.path.insert(0, str(CODE_DIR))
# Coverage
if "COVERAGE_PROCESS_START" in os.environ:
MAYBE_RUN_COVERAGE = True
COVERAGERC_FILE = os.environ["COVERAGE_PROCESS_START"]
else:
COVERAGERC_FILE = str(CODE_DIR / ".coveragerc")
MAYBE_RUN_COVERAGE = (
sys.argv[0].endswith("pytest.py") or "_COVERAGE_RCFILE" in os.environ
)
if MAYBE_RUN_COVERAGE:
# Flag coverage to track suprocesses by pointing it to the right .coveragerc file
os.environ["COVERAGE_PROCESS_START"] = str(COVERAGERC_FILE)
# Define the pytest plugins we rely on
pytest_plugins = ["tempdir", "helpers_namespace"]
# Define where not to collect tests from
collect_ignore = ["setup.py"]
# Patch PyTest logging handlers
class LogCaptureHandler(
salt.log.mixins.ExcInfoOnLogLevelFormatMixIn, _pytest.logging.LogCaptureHandler
):
"""
Subclassing PyTest's LogCaptureHandler in order to add the
exc_info_on_loglevel functionality and actually make it a NullHandler,
it's only used to print log messages emmited during tests, which we
have explicitly disabled in pytest.ini
"""
_pytest.logging.LogCaptureHandler = LogCaptureHandler
class LiveLoggingStreamHandler(
salt.log.mixins.ExcInfoOnLogLevelFormatMixIn,
_pytest.logging._LiveLoggingStreamHandler,
):
"""
Subclassing PyTest's LiveLoggingStreamHandler in order to add the
exc_info_on_loglevel functionality.
"""
_pytest.logging._LiveLoggingStreamHandler = LiveLoggingStreamHandler
# Reset logging root handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Reset the root logger to its default level(because salt changed it)
logging.root.setLevel(logging.WARNING)
log = logging.getLogger("salt.testsuite")
# ----- PyTest Tempdir Plugin Hooks --------------------------------------------------------------------------------->
def pytest_tempdir_basename():
"""
Return the temporary directory basename for the salt test suite.
"""
return "salt-tests-tmpdir"
# <---- PyTest Tempdir Plugin Hooks ----------------------------------------------------------------------------------
# ----- CLI Options Setup ------------------------------------------------------------------------------------------->
def pytest_addoption(parser):
"""
register argparse-style options and ini-style config values.
"""
test_selection_group = parser.getgroup("Tests Selection")
test_selection_group.addoption(
"--from-filenames",
default=None,
help=(
"Pass a comma-separated list of file paths, and any test module which corresponds to the "
"specified file(s) will run. For example, if 'setup.py' was passed, then the corresponding "
"test files defined in 'tests/filename_map.yml' would run. Absolute paths are assumed to be "
"files containing relative paths, one per line. Providing the paths in a file can help get "
"around shell character limits when the list of files is long."
),
)
# Add deprecated CLI flag until we completely switch to PyTest
test_selection_group.addoption(
"--names-file", default=None, help="Deprecated option"
)
test_selection_group.addoption(
"--transport",
default="zeromq",
choices=("zeromq", "tcp"),
help=(
"Select which transport to run the integration tests with, zeromq or tcp. Default: %(default)s"
),
)
test_selection_group.addoption(
"--ssh",
"--ssh-tests",
dest="ssh",
action="store_true",
default=False,
help="Run salt-ssh tests. These tests will spin up a temporary "
"SSH server on your machine. In certain environments, this "
"may be insecure! Default: False",
)
test_selection_group.addoption(
"--proxy",
"--proxy-tests",
dest="proxy",
action="store_true",
default=False,
help="Run proxy tests",
)
test_selection_group.addoption(
"--run-slow", action="store_true", default=False, help="Run slow tests.",
)
output_options_group = parser.getgroup("Output Options")
output_options_group.addoption(
"--output-columns",
default=80,
type=int,
help="Number of maximum columns to use on the output",
)
output_options_group.addoption(
"--no-colors",
"--no-colours",
default=False,
action="store_true",
help="Disable colour printing.",
)
# ----- Test Groups --------------------------------------------------------------------------------------------->
# This will allow running the tests in chunks
test_selection_group.addoption(
"--test-group-count",
dest="test-group-count",
type=int,
help="The number of groups to split the tests into",
)
test_selection_group.addoption(
"--test-group",
dest="test-group",
type=int,
help="The group of tests that should be executed",
)
# <---- Test Groups ----------------------------------------------------------------------------------------------
# Toaster specific
parser.addini("tests_type", help="Type of the tests being run", default='unit')
# <---- CLI Options Setup --------------------------------------------------------------------------------------------
# ----- Register Markers -------------------------------------------------------------------------------------------->
@pytest.mark.trylast
def pytest_configure(config):
"""
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
"""
for dirname in CODE_DIR.iterdir():
if not dirname.is_dir():
continue
if dirname != TESTS_DIR:
config.addinivalue_line("norecursedirs", str(CODE_DIR / dirname))
# Expose the markers we use to pytest CLI
config.addinivalue_line(
"markers",
"requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
)
config.addinivalue_line(
"markers",
"requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
)
config.addinivalue_line(
"markers", "windows_whitelisted: Mark test as whitelisted to run under Windows"
)
config.addinivalue_line(
"markers", "requires_sshd_server: Mark test that require an SSH server running"
)
# Make sure the test suite "knows" this is a pytest test run
RUNTIME_VARS.PYTEST_SESSION = True
# "Flag" the slotTest decorator if we're skipping slow tests or not
os.environ["SLOW_TESTS"] = str(config.getoption("--run-slow"))
# Toaster specific
config.salt_version = salt.version.__version__
config.xfail_list = get_list(config, 'xfail_list')
config.ignore_list = get_list(config, 'ignore_list')
# <---- Register Markers ---------------------------------------------------------------------------------------------
# ----- PyTest Tweaks ----------------------------------------------------------------------------------------------->
def set_max_open_files_limits(min_soft=3072, min_hard=4096):
# Get current limits
if salt.utils.platform.is_windows():
import win32file
prev_hard = win32file._getmaxstdio()
prev_soft = 512
else:
import resource
prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# Check minimum required limits
set_limits = False
if prev_soft < min_soft:
soft = min_soft
set_limits = True
else:
soft = prev_soft
if prev_hard < min_hard:
hard = min_hard
set_limits = True
else:
hard = prev_hard
# Increase limits
if set_limits:
log.debug(
" * Max open files settings is too low (soft: %s, hard: %s) for running the tests. "
"Trying to raise the limits to soft: %s, hard: %s",
prev_soft,
prev_hard,
soft,
hard,
)
try:
if salt.utils.platform.is_windows():
hard = 2048 if hard > 2048 else hard
win32file._setmaxstdio(hard)
else:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to raise the max open files settings -> %s. Please issue the following command "
"on your console: 'ulimit -u %s'",
err,
soft,
)
exit(1)
return soft, hard
def pytest_report_header():
soft, hard = set_max_open_files_limits()
return "max open files; soft: {}; hard: {}".format(soft, hard)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
groups_collection_modifyitems(config, items)
from_filenames_collection_modifyitems(config, items)
log.warning("Mofifying collected tests to keep track of fixture usage")
for item in items:
for fixture in item.fixturenames:
if fixture not in item._fixtureinfo.name2fixturedefs:
continue
for fixturedef in item._fixtureinfo.name2fixturedefs[fixture]:
if fixturedef.scope != "package":
continue
try:
fixturedef.finish.__wrapped__
except AttributeError:
original_func = fixturedef.finish
def wrapper(func, fixturedef):
@wraps(func)
def wrapped(self, request, nextitem=False):
try:
return self._finished
except AttributeError:
if nextitem:
fpath = pathlib.Path(self.baseid).resolve()
tpath = pathlib.Path(
nextitem.fspath.strpath
).resolve()
try:
tpath.relative_to(fpath)
# The test module is within the same package that the fixture is
if (
not request.session.shouldfail
and not request.session.shouldstop
):
log.debug(
"The next test item is still under the fixture package path. "
"Not terminating %s",
self,
)
return
except ValueError:
pass
log.debug("Finish called on %s", self)
try:
return func(request)
except BaseException as exc: # pylint: disable=broad-except
pytest.fail(
"Failed to run finish() on {}: {}".format(
fixturedef, exc
),
pytrace=True,
)
finally:
self._finished = True
return partial(wrapped, fixturedef)
fixturedef.finish = wrapper(fixturedef.finish, fixturedef)
try:
fixturedef.finish.__wrapped__
except AttributeError:
fixturedef.finish.__wrapped__ = original_func
@pytest.hookimpl(trylast=True, hookwrapper=True)
def pytest_runtest_protocol(item, nextitem):
"""
implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
Stops at first non-None result, see :ref:`firstresult`
"""
request = item._request
used_fixture_defs = []
for fixture in item.fixturenames:
if fixture not in item._fixtureinfo.name2fixturedefs:
continue
for fixturedef in reversed(item._fixtureinfo.name2fixturedefs[fixture]):
if fixturedef.scope != "package":
continue
used_fixture_defs.append(fixturedef)
try:
# Run the test
yield
finally:
for fixturedef in used_fixture_defs:
fixturedef.finish(request, nextitem=nextitem)
del request
del used_fixture_defs
# <---- PyTest Tweaks ------------------------------------------------------------------------------------------------
# ----- Test Setup -------------------------------------------------------------------------------------------------->
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
Fixtures injection based on markers or test skips based on CLI arguments
"""
integration_utils_tests_path = str(TESTS_DIR / "integration" / "utils")
if (
str(item.fspath).startswith(integration_utils_tests_path)
and PRE_PYTEST_SKIP_OR_NOT is True
):
item._skipped_by_mark = True
pytest.skip(PRE_PYTEST_SKIP_REASON)
if saltfactories.utils.compat.has_unittest_attr(item, "__slow_test__"):
if item.config.getoption("--run-slow") is False:
item._skipped_by_mark = True
pytest.skip("Slow tests are disabled!")
requires_sshd_server_marker = item.get_closest_marker("requires_sshd_server")
if requires_sshd_server_marker is not None:
if not item.config.getoption("--ssh-tests"):
item._skipped_by_mark = True
pytest.skip("SSH tests are disabled, pass '--ssh-tests' to enable them.")
item.fixturenames.append("sshd_server")
requires_salt_modules_marker = item.get_closest_marker("requires_salt_modules")
if requires_salt_modules_marker is not None:
required_salt_modules = requires_salt_modules_marker.args
if len(required_salt_modules) == 1 and isinstance(
required_salt_modules[0], (list, tuple, set)
):
required_salt_modules = required_salt_modules[0]
required_salt_modules = set(required_salt_modules)
not_available_modules = check_required_sminion_attributes(
"functions", required_salt_modules
)
if not_available_modules:
item._skipped_by_mark = True
if len(not_available_modules) == 1:
pytest.skip(
"Salt module '{}' is not available".format(*not_available_modules)
)
pytest.skip(
"Salt modules not available: {}".format(
", ".join(not_available_modules)
)
)
requires_salt_states_marker = item.get_closest_marker("requires_salt_states")
if requires_salt_states_marker is not None:
required_salt_states = requires_salt_states_marker.args
if len(required_salt_states) == 1 and isinstance(
required_salt_states[0], (list, tuple, set)
):
required_salt_states = required_salt_states[0]
required_salt_states = set(required_salt_states)
not_available_states = check_required_sminion_attributes(
"states", required_salt_states
)
if not_available_states:
item._skipped_by_mark = True
if len(not_available_states) == 1:
pytest.skip(
"Salt state module '{}' is not available".format(
*not_available_states
)
)
pytest.skip(
"Salt state modules not available: {}".format(
", ".join(not_available_states)
)
)
if salt.utils.platform.is_windows():
unit_tests_paths = (
str(TESTS_DIR / "unit"),
str(PYTESTS_DIR / "unit"),
)
if not str(pathlib.Path(item.fspath).resolve()).startswith(unit_tests_paths):
# Unit tests are whitelisted on windows by default, so, we're only
# after all other tests
windows_whitelisted_marker = item.get_closest_marker("windows_whitelisted")
if windows_whitelisted_marker is None:
item._skipped_by_mark = True
pytest.skip("Test is not whitelisted for Windows")
# <---- Test Setup ---------------------------------------------------------------------------------------------------
# ----- Test Groups Selection --------------------------------------------------------------------------------------->
def get_group_size_and_start(total_items, total_groups, group_id):
"""
Calculate group size and start index.
"""
base_size = total_items // total_groups
rem = total_items % total_groups
start = base_size * (group_id - 1) + min(group_id - 1, rem)
size = base_size + 1 if group_id <= rem else base_size
return (start, size)
def get_group(items, total_groups, group_id):
"""
Get the items from the passed in group based on group size.
"""
if not 0 < group_id <= total_groups:
raise ValueError("Invalid test-group argument")
start, size = get_group_size_and_start(len(items), total_groups, group_id)
selected = items[start : start + size]
deselected = items[:start] + items[start + size :]
assert len(selected) + len(deselected) == len(items)
return selected, deselected
def groups_collection_modifyitems(config, items):
group_count = config.getoption("test-group-count")
group_id = config.getoption("test-group")
if not group_count or not group_id:
# We're not selection tests using groups, don't do any filtering
return
total_items = len(items)
tests_in_group, deselected = get_group(items, group_count, group_id)
# Replace all items in the list
items[:] = tests_in_group
if deselected:
config.hook.pytest_deselected(items=deselected)
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
terminal_reporter.write(
"Running test group #{} ({} tests)\n".format(group_id, len(items)), yellow=True,
)
# <---- Test Groups Selection ----------------------------------------------------------------------------------------
# ----- Fixtures Overrides ------------------------------------------------------------------------------------------>
@pytest.fixture(scope="session")
def salt_factories_config():
"""
Return a dictionary with the keyworkd arguments for FactoriesManager
"""
return {
"code_dir": str(CODE_DIR),
"inject_coverage": MAYBE_RUN_COVERAGE,
"inject_sitecustomize": MAYBE_RUN_COVERAGE,
"start_timeout": 120
if (os.environ.get("JENKINS_URL") or os.environ.get("CI"))
else 60,
}
# <---- Fixtures Overrides -------------------------------------------------------------------------------------------
# ----- Salt Factories ---------------------------------------------------------------------------------------------->
@pytest.fixture(scope="session")
def integration_files_dir(salt_factories):
"""
Fixture which returns the salt integration files directory path.
Creates the directory if it does not yet exist.
"""
dirname = salt_factories.root_dir / "integration-files"
dirname.mkdir(exist_ok=True)
for child in (PYTESTS_DIR / "integration" / "files").iterdir():
if child.is_dir():
shutil.copytree(str(child), str(dirname / child.name))
else:
shutil.copyfile(str(child), str(dirname / child.name))
return dirname
@pytest.fixture(scope="session")
def state_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt state tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir / "state-tree"
dirname.mkdir(exist_ok=True)
return dirname
@pytest.fixture(scope="session")
def pillar_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt pillar tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir / "pillar-tree"
dirname.mkdir(exist_ok=True)
return dirname
@pytest.fixture(scope="session")
def base_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt base environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir / "base"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_STATE_TREE = str(dirname.resolve())
RUNTIME_VARS.TMP_BASEENV_STATE_TREE = RUNTIME_VARS.TMP_STATE_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt prod environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir / "prod"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PRODENV_STATE_TREE = str(dirname.resolve())
return dirname
@pytest.fixture(scope="session")
def base_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt base environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir / "base"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PILLAR_TREE = str(dirname.resolve())
RUNTIME_VARS.TMP_BASEENV_PILLAR_TREE = RUNTIME_VARS.TMP_PILLAR_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt prod environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir / "prod"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE = str(dirname.resolve())
return dirname
@pytest.fixture(scope="session")
def salt_syndic_master_factory(
request,
salt_factories,
base_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_state_tree_root_dir,
prod_env_pillar_tree_root_dir,
):
root_dir = salt_factories.get_root_dir_for_daemon("syndic_master")
conf_dir = root_dir / "conf"
conf_dir.mkdir(exist_ok=True)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic_master")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = str(root_dir / "salt_ssh_known_hosts")
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = str(root_dir)
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = str(root_dir / "extension_modules")
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = str(root_dir / "autosign_file")
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
str(base_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
str(prod_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
str(base_env_pillar_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [str(prod_env_pillar_tree_root_dir)],
},
}
)
factory = salt_factories.get_salt_master_daemon(
"syndic_master",
order_masters=True,
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_syndic_factory(salt_factories, salt_syndic_master_factory):
config_defaults = {"master": None, "minion": None, "syndic": None}
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic")) as rfh:
opts = yaml.deserialize(rfh.read())
opts["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
opts["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
opts["transport"] = salt_syndic_master_factory.config["transport"]
config_defaults["syndic"] = opts
factory = salt_syndic_master_factory.get_salt_syndic_daemon(
"syndic",
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_master_factory(
salt_factories,
salt_syndic_master_factory,
base_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_state_tree_root_dir,
prod_env_pillar_tree_root_dir,
):
root_dir = salt_factories.get_root_dir_for_daemon("master")
conf_dir = root_dir / "conf"
conf_dir.mkdir(exist_ok=True)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "master")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = str(root_dir / "salt_ssh_known_hosts")
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = str(root_dir)
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = salt_syndic_master_factory.config["transport"]
config_defaults["reactor"] = [
{"salt/test/reactor": [os.path.join(RUNTIME_VARS.FILES, "reactor-test.sls")]}
]
config_overrides = {}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
ext_pillar.append(
{
"file_tree": {
"root_dir": os.path.join(RUNTIME_VARS.PILLAR_DIR, "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
}
)
config_overrides["pillar_opts"] = True
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = str(root_dir / "extension_modules")
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = str(root_dir / "autosign_file")
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
str(base_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
str(prod_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
str(base_env_pillar_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [str(prod_env_pillar_tree_root_dir)],
},
}
)
# Let's copy over the test cloud config files and directories into the running master config directory
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if not entry.startswith("cloud"):
continue
source = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
dest = str(conf_dir / entry)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copyfile(source, dest)
factory = salt_syndic_master_factory.get_salt_master_daemon(
"master",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_minion_factory(salt_master_factory):
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "minion")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
config_overrides = {
"file_roots": salt_master_factory.config["file_roots"].copy(),
"pillar_roots": salt_master_factory.config["pillar_roots"].copy(),
}
virtualenv_binary = get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
factory = salt_master_factory.get_salt_minion_daemon(
"minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_sub_minion_factory(salt_master_factory):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
config_overrides = {
"file_roots": salt_master_factory.config["file_roots"].copy(),
"pillar_roots": salt_master_factory.config["pillar_roots"].copy(),
}
virtualenv_binary = get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
factory = salt_master_factory.get_salt_minion_daemon(
"sub_minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_proxy_factory(salt_factories, salt_master_factory):
proxy_minion_id = "proxytest"
root_dir = salt_factories.get_root_dir_for_daemon(proxy_minion_id)
conf_dir = root_dir / "conf"
conf_dir.mkdir(parents=True, exist_ok=True)
RUNTIME_VARS.TMP_PROXY_CONF_DIR = str(conf_dir)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "proxy")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["root_dir"] = str(root_dir)
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
factory = salt_master_factory.get_salt_proxy_minion_daemon(
proxy_minion_id,
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_cli(salt_master_factory):
return salt_master_factory.get_salt_cli()
@pytest.fixture(scope="session")
def salt_cp_cli(salt_master_factory):
return salt_master_factory.get_salt_cp_cli()
@pytest.fixture(scope="session")
def salt_key_cli(salt_master_factory):
return salt_master_factory.get_salt_key_cli()
@pytest.fixture(scope="session")
def salt_run_cli(salt_master_factory):
return salt_master_factory.get_salt_run_cli()
@pytest.fixture(scope="session")
def salt_ssh_cli(salt_master_factory):
return salt_master_factory.get_salt_ssh_cli()
@pytest.fixture(scope="session")
def salt_call_cli(salt_minion_factory):
return salt_minion_factory.get_salt_call_cli()
@pytest.fixture(scope="session", autouse=True)
def bridge_pytest_and_runtests(
reap_stray_processes,
salt_factories,
salt_syndic_master_factory,
salt_syndic_factory,
salt_master_factory,
salt_minion_factory,
salt_sub_minion_factory,
sshd_config_dir,
):
# Make sure unittest2 uses the pytest generated configuration
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(salt_master_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(salt_minion_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(salt_sub_minion_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(
salt_syndic_master_factory.config
)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(salt_syndic_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["client_config"] = freeze(
salt.config.client_config(salt_master_factory.config["conf_file"])
)
# Make sure unittest2 classes know their paths
RUNTIME_VARS.TMP_ROOT_DIR = str(salt_factories.root_dir.resolve())
RUNTIME_VARS.TMP_CONF_DIR = os.path.dirname(salt_master_factory.config["conf_file"])
RUNTIME_VARS.TMP_MINION_CONF_DIR = os.path.dirname(
salt_minion_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR = os.path.dirname(
salt_sub_minion_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR = os.path.dirname(
salt_syndic_master_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR = os.path.dirname(
salt_syndic_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SSH_CONF_DIR = str(sshd_config_dir)
@pytest.fixture(scope="session")
def sshd_config_dir(salt_factories):
config_dir = salt_factories.get_root_dir_for_daemon("sshd")
yield config_dir
shutil.rmtree(str(config_dir), ignore_errors=True)
@pytest.fixture(scope="module")
def sshd_server(salt_factories, sshd_config_dir, salt_master):
sshd_config_dict = {
"Protocol": "2",
# Turn strict modes off so that we can operate in /tmp
"StrictModes": "no",
# Logging
"SyslogFacility": "AUTH",
"LogLevel": "INFO",
# Authentication:
"LoginGraceTime": "120",
"PermitRootLogin": "without-password",
"PubkeyAuthentication": "yes",
# Don't read the user's ~/.rhosts and ~/.shosts files
"IgnoreRhosts": "yes",
"HostbasedAuthentication": "no",
# To enable empty passwords, change to yes (NOT RECOMMENDED)
"PermitEmptyPasswords": "no",
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
"ChallengeResponseAuthentication": "no",
# Change to no to disable tunnelled clear text passwords
"PasswordAuthentication": "no",
"X11Forwarding": "no",
"X11DisplayOffset": "10",
"PrintMotd": "no",
"PrintLastLog": "yes",
"TCPKeepAlive": "yes",
"AcceptEnv": "LANG LC_*",
"Subsystem": "sftp /usr/lib/openssh/sftp-server",
"UsePAM": "yes",
}
factory = salt_factories.get_sshd_daemon(
sshd_config_dict=sshd_config_dict, config_dir=sshd_config_dir,
)
# We also need a salt-ssh roster config file
roster_path = pathlib.Path(salt_master.config_dir) / "roster"
roster_contents = textwrap.dedent(
"""\
localhost:
host: 127.0.0.1
port: {}
user: {}
mine_functions:
test.arg: ['itworked']
""".format(
factory.listen_port, RUNTIME_VARS.RUNNING_TESTS_USER
)
)
if salt.utils.platform.is_darwin():
roster_contents += " set_path: $PATH:/usr/local/bin/\n"
log.debug(
"Writing to configuration file %s. Configuration:\n%s",
roster_path,
roster_contents,
)
with salt.utils.files.fopen(str(roster_path), "w") as wfh:
wfh.write(roster_contents)
with factory.started():
yield factory
if roster_path.exists():
roster_path.unlink()
# <---- Salt Factories -----------------------------------------------------------------------------------------------
# ----- From Filenames Test Selection ------------------------------------------------------------------------------->
def _match_to_test_file(match):
parts = match.split(".")
parts[-1] += ".py"
return TESTS_DIR.joinpath(*parts).relative_to(CODE_DIR)
def from_filenames_collection_modifyitems(config, items):
from_filenames = config.getoption("--from-filenames")
if not from_filenames:
# Don't do anything
return
test_categories_paths = (
(TESTS_DIR / "integration").relative_to(CODE_DIR),
(TESTS_DIR / "multimaster").relative_to(CODE_DIR),
(TESTS_DIR / "unit").relative_to(CODE_DIR),
(PYTESTS_DIR / "e2e").relative_to(CODE_DIR),
(PYTESTS_DIR / "functional").relative_to(CODE_DIR),
(PYTESTS_DIR / "integration").relative_to(CODE_DIR),
(PYTESTS_DIR / "unit").relative_to(CODE_DIR),
)
test_module_paths = set()
from_filenames_listing = set()
for path in [pathlib.Path(path.strip()) for path in from_filenames.split(",")]:
if path.is_absolute():
# In this case, this path is considered to be a file containing a line separated list
# of files to consider
with salt.utils.files.fopen(str(path)) as rfh:
for line in rfh:
line_path = pathlib.Path(line.strip())
if not line_path.exists():
continue
from_filenames_listing.add(line_path)
continue
from_filenames_listing.add(path)
filename_map = yaml.deserialize((TESTS_DIR / "filename_map.yml").read_text())
# Let's add the match all rule
for rule, matches in filename_map.items():
if rule == "*":
for match in matches:
test_module_paths.add(_match_to_test_file(match))
break
# Let's now go through the list of files gathered
for filename in from_filenames_listing:
if str(filename).startswith("tests/"):
# Tests in the listing don't require additional matching and will be added to the
# list of tests to run
test_module_paths.add(filename)
continue
if filename.name == "setup.py" or str(filename).startswith("salt/"):
if path.name == "__init__.py":
# No direct macthing
continue
# Now let's try a direct match between the passed file and possible test modules
for test_categories_path in test_categories_paths:
test_module_path = test_categories_path / "test_{}".format(path.name)
if test_module_path.is_file():
test_module_paths.add(test_module_path)
continue
# Do we have an entry in tests/filename_map.yml
for rule, matches in filename_map.items():
if rule == "*":
continue
elif "|" in rule:
# This is regex
if re.match(rule, str(filename)):
for match in matches:
test_module_paths.add(_match_to_test_file(match))
elif "*" in rule or "\\" in rule:
# Glob matching
for filerule in CODE_DIR.glob(rule):
if not filerule.exists():
continue
filerule = filerule.relative_to(CODE_DIR)
if filerule != filename:
continue
for match in matches:
test_module_paths.add(_match_to_test_file(match))
else:
if str(filename) != rule:
continue
# Direct file paths as rules
filerule = pathlib.Path(rule)
if not filerule.exists():
continue
for match in matches:
test_module_paths.add(_match_to_test_file(match))
continue
else:
log.debug("Don't know what to do with path %s", filename)
selected = []
deselected = []
for item in items:
itempath = pathlib.Path(str(item.fspath)).resolve().relative_to(CODE_DIR)
if itempath in test_module_paths:
selected.append(item)
else:
deselected.append(item)
items[:] = selected
if deselected:
config.hook.pytest_deselected(items=deselected)
# <---- From Filenames Test Selection --------------------------------------------------------------------------------
# ----- Custom Fixtures --------------------------------------------------------------------------------------------->
@pytest.fixture(scope="session")
def reap_stray_processes():
# Run tests
yield
children = psutil.Process(os.getpid()).children(recursive=True)
if not children:
log.info("No astray processes found")
return
def on_terminate(proc):
log.debug("Process %s terminated with exit code %s", proc, proc.returncode)
if children:
# Reverse the order, sublings first, parents after
children.reverse()
log.warning(
"Test suite left %d astray processes running. Killing those processes:\n%s",
len(children),
pprint.pformat(children),
)
_, alive = psutil.wait_procs(children, timeout=3, callback=on_terminate)
for child in alive:
try:
child.kill()
except psutil.NoSuchProcess:
continue
_, alive = psutil.wait_procs(alive, timeout=3, callback=on_terminate)
if alive:
# Give up
for child in alive:
log.warning(
"Process %s survived SIGKILL, giving up:\n%s",
child,
pprint.pformat(child.as_dict()),
)
@pytest.fixture(scope="session")
def sminion():
return create_sminion()
@pytest.fixture(scope="session")
def grains(sminion):
return sminion.opts["grains"].copy()
@pytest.fixture
def ssl_webserver(integration_files_dir, scope="module"):
"""
spins up an https webserver.
"""
if sys.version_info < (3, 5, 3):
pytest.skip("Python versions older than 3.5.3 do not define `ssl.PROTOCOL_TLS`")
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(
str(integration_files_dir / "https" / "cert.pem"),
str(integration_files_dir / "https" / "key.pem"),
)
webserver = Webserver(root=str(integration_files_dir), ssl_opts=context)
webserver.start()
yield webserver
webserver.stop()
# <---- Custom Fixtures ----------------------------------------------------------------------------------------------
KNOWN_ISSUES_INTEGRATION = {
'ignore_list': {
'common': [
'tests/integration/externalapi/test_venafiapi.py',
'test_state.py::OrchEventTest::test_parallel_orchestrations',
'test_state.py::StateModuleTest::test_requisites_onfail_any',
'files/file/base/*', # should no be included
'utils/test_reactor.py', # not yet implemented
'*::SaltnadoTestCase::*', # these are not actual tests
'cloud/providers/msazure.py',
'modules/git.py',
'cloud/helpers/virtualbox.py',
'utils/*',
# Running following tests causes unsuccessfully close
# of forked processes. This will cause "hanging" jenkins jobs.
'states/supervisord.py',
'*::MasterTest::test_exit_status_correct_usage',
'*::ProxyTest::test_exit_status_correct_usage',
'*::FileTest::test_issue_2227_file_append',
'*::FileTest::test_issue_8947_utf8_sls',
# Evil test
'reactor/reactor.py', # This test causes "py.test" never finishes
# 'runners/fileserver.py::FileserverTest::test_clear_file_list_cache', # this test hangs
'runners/fileserver.py', # workaround for comment above
# 'wheel/key.py::KeyWheelModuleTest::test_list_all', # ERROR at teardown
'*/wheel/key.py', # workaround for comment above
'*/wheel/client.py',
'*/virtualenv.py',
'*/states/user.py',
'*states/svn.py',
'*/kitchen/tests/wordpress/*',
'pillar/test_git_pillar.py',
# We are not interested in the NetapiClientTests
'*/netapi/test_client.py',
# This makes a request to github.com
'*/modules/ssh.py',
# CRON is not installed on toaster images and cron tests are not designed for SUSE.
'*/states/test_cron.py',
# NEED INVESTIGATION
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_multi_local_async_post',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_multi_local_async_post_multitoken',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_async_post',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_runner_post',
'*/test_state.py::StateModuleTest::test_onchanges_in_requisite',
'*/test_state.py::StateModuleTest::test_onchanges_requisite',
'*/test_state.py::StateModuleTest::test_onchanges_requisite_multiple',
'*/test_state.py::StateModuleTest::test_requisites_onchanges_any',
'*/runners/test_state.py::StateRunnerTest::test_orchestrate_retcode',
'*/shell/test_call.py::CallTest::test_issue_14979_output_file_permissions',
'*/shell/test_call.py::CallTest::test_issue_15074_output_file_append',
'*/shell/test_call.py::CallTest::test_issue_2731_masterless',
'*/modules/ssh.py',
'*/proxy/test_shell.py', # proxy minion is not starting
# After switch to M2Crypto
'cloud/clouds/test_digitalocean.py', # ModuleNotFoundError: No module named 'Crypto'
],
'rhel6': [
# Avoid error due:
# [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'*/modules/gem.py',
],
# disable 2017.7.1 on python 2.6
'rhel6/products-next': ['*'],
'sles11sp3/products-next': ['*'],
'sles11sp4/products-next': ['*'],
'sles11sp3': ['*/modules/gem.py', '*/modules/ssh.py'],
'sles11sp4': ['*/modules/gem.py', '*/modules/ssh.py'],
},
'xfail_list': {
'common': [
# Always failing
'*sysmod.py::SysModuleTest::test_valid_docs',
'cloud/providers/virtualbox.py::BaseVirtualboxTests::test_get_manager',
'modules/timezone.py::TimezoneLinuxModuleTest::test_get_hwclock',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_develop',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_head',
'states/git.py::GitTest::test_latest_fast_forward',
'states/git.py::LocalRepoGitTest::test_renamed_default_branch',
'loader/ext_grains.py::LoaderGrainsTest::test_grains_overwrite',
'loader/ext_modules.py::LoaderOverridesTest::test_overridden_internal',
'modules/decorators.py::DecoratorTest::test_depends',
'modules/decorators.py::DecoratorTest::test_depends_will_not_fallback',
'modules/decorators.py::DecoratorTest::test_missing_depends_will_fallback',
# Sometimes failing in jenkins.
'shell/call.py::CallTest::test_issue_14979_output_file_permissions',
'shell/call.py::CallTest::test_issue_15074_output_file_append',
'shell/call.py::CallTest::test_issue_2731_masterless',
'shell/matcher.py::MatchTest::test_grain',
'netapi/rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_post_only_dictionary_request',
'shell/master_tops.py::MasterTopsTest::test_custom_tops_gets_utilized',
'states/svn.py::SvnTest::test_latest', # sles12sp1
'states/svn.py::SvnTest::test_latest_empty_dir', # sles12sp1
'runners/state.py::StateRunnerTest::test_orchestrate_output', # sles12sp1 rhel7
'modules/test_saltutil.py::SaltUtilSyncPillarTest::test_pillar_refresh', # sles12sp2
'*::test_issue_7754',
'*test_fileserver.py::FileserverTest::test_symlink_list',
'*test_fileserver.py::FileserverTest::test_empty_dir_list',
'*test_timezone.py::TimezoneLinuxModuleTest::test_get_hwclock',
'*test_file.py::FileTest::test_managed_check_cmd',
'modules/test_network.py::NetworkTest::test_network_ping', # Bad test implementation
# Needs investigation. Setting them to xfail to have a "new green start" on March 15th
# see https://github.com/SUSE/spacewalk/issues/14284
'states/test_match.py::StateMatchTest::test_issue_2167_ipcidr_no_AttributeError',
'states/test_file.py::FileTest::test_directory_broken_symlink',
'shell/test_matcher.py::MatchTest::test_ipcidr',
'netapi/rest_cherrypy/test_app.py::TestJobs::test_all_jobs',
'netapi/rest_cherrypy/test_app.py::TestAuth::test_webhook_auth',
'modules/test_saltutil.py::SaltUtilModuleTest::test_wheel_just_function',
'modules/test_network.py::NetworkTest::test_network_netstat',
'modules/test_cp.py::CPModuleTest::test_get_dir_templated_paths',
'modules/test_cmdmod.py::CMDModuleTest::test_script_retcode',
'modules/test_cmdmod.py::CMDModuleTest::test_script_cwd_with_space',
'modules/test_cmdmod.py::CMDModuleTest::test_script_cwd',
'modules/test_cmdmod.py::CMDModuleTest::test_script',
'modules/test_cmdmod.py::CMDModuleTest::test_has_exec',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code_with_single_arg',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code_with_multiple_args',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code',
# Failing in 3003.3
'modules/saltutil/test_wheel.py::test_wheel_just_function',
'modules/test_pip.py::PipModuleTest::test_pip_install_multiple_editables',
'states/test_pip_state.py::PipStateTest::test_issue_2028_pip_installed_state',
'cli/test_matcher.py::test_ipcidr',
],
'rhel6': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'rhel7': [
'states/archive.py::ArchiveTest::test_archive_extracted_skip_verify',
'states/archive.py::ArchiveTest::test_archive_extracted_with_root_user_and_group',
'states/archive.py::ArchiveTest::test_archive_extracted_with_source_hash',
],
'sles11sp3': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'sles11sp4': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'shell/master.py::MasterTest::test_exit_status_correct_usage',
'states/git.py::GitTest::test_config_set_value_with_space_character',
'states/git.py::GitTest::test_latest',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_develop',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_head',
'states/git.py::GitTest::test_latest_empty_dir',
'states/git.py::GitTest::test_latest_unless_no_cwd_issue_6800',
'states/git.py::GitTest::test_latest_with_local_changes',
'states/git.py::GitTest::test_latest_with_rev_and_submodules',
'states/git.py::GitTest::test_numeric_rev',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'sles12': [
],
'sles12sp1': [
],
'sles12sp2': [
],
'sles12sp3': [
'modules/test_pkg.py::PkgModuleTest::test_mod_del_repo_multiline_values', # this test should not be executed on SUSE systems
],
'sles15': [
'modules/test_pkg.py::PkgModuleTest::test_mod_del_repo_multiline_values', # this test should not be executed on SUSE systems
],
'ubuntu1604': [
'shell/test_enabled.py::EnabledTest::test_shell_default_enabled', # https://github.com/saltstack/salt/issues/52898
'shell/test_enabled.py::EnabledTest::test_template_shell', # https://github.com/saltstack/salt/issues/52898
],
'ubuntu1804': [
'shell/test_enabled.py::EnabledTest::test_shell_default_enabled', # https://github.com/saltstack/salt/issues/52898
'shell/test_enabled.py::EnabledTest::test_template_shell', # https://github.com/saltstack/salt/issues/52898
],
}
}
KNOWN_ISSUES_UNIT = {
'ignore_list': {
'common': [
'test_engines.py', # Make pytest to stuck for long time after tests are executed
'modules/test_boto3_elasticsearch.py',
'zypp_plugins_test.py', # BogusIO missing in zypp_plugin
'netapi/rest_tornado/test_handlers.py',
'netapi/test_rest_tornado.py',
'returners/smtp_return_test.py',
'transport/zeromq_test.py', # Prevent pytests hang after tests
'conf_test.py::ConfTest::test_conf_master_sample_is_commented', # we have uncommented custom config
'conf_test.py::ConfTest::test_conf_minion_sample_is_commented', # we have uncommented custom config
'conf_test.py::ConfTest::test_conf_proxy_sample_is_commented', # we have uncommented custom config
'*rsync_test.py::*',
'test_module_names.py',
'modules/darwin_sysctl_test.py',
'states/boto_cloudwatch_event_test.py',
'modules/boto_vpc_test.py',
'states/boto_vpc_test.py',
'utils/boto_test.py',
'modules/win_ip_test.py::WinShadowTestCase::test_set_static_ip', # takes too long to execute
'states/blockdev_test.py::BlockdevTestCase::test_formatted', # takes too long to execute
'cloud/clouds/dimensiondata_test.py',
'cloud/clouds/gce_test.py',
'*/utils/test_parsers.py',
'*/kitchen/tests/wordpress/*',
'fileserver/test_gitfs.py',
# NEEDS INVESTIGATION
'test_pip.py::PipStateTest::test_install_requirements_parsing',
'*/modules/test_useradd.py',
'utils/cache_mods/cache_mod.py',
'modules/test_boto_vpc.py',
'states/test_boto_vpc.py',
'states/test_augeas.py::AugeasTestCase::test_change_no_context_with_full_path_fail',
# Not running tests for cheetah, mako and genshi templating
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate_text',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate_xml',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_sanity',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_variable',
'utils/test_templates.py::RenderTestCase::test_render_genshi_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_genshi_evaluate_condition',
'utils/test_templates.py::RenderTestCase::test_render_genshi_sanity',
'utils/test_templates.py::RenderTestCase::test_render_genshi_variable',
'utils/test_templates.py::RenderTestCase::test_render_genshi_variable_replace',
'utils/test_templates.py::RenderTestCase::test_render_mako_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_mako_evaluate_multi',
'utils/test_templates.py::RenderTestCase::test_render_mako_sanity',
'utils/test_templates.py::RenderTestCase::test_render_mako_variable',
# This produces a bad file descriptor error at the end of the testsuite, even if the tests passes
'utils/test_thin.py::SSHThinTestCase::test_gen_thin_compression_fallback_py3',
# contain NO_MOCK which does not exist anymore (throws ImportError)
'cli/test_support.py',
'modules/test_saltsupport.py',
'utils/test_pkg.py',
# duplicated test file, should be removed in favor of the one in tests/pytests/
'tests/unit/modules/test_ansiblegate.py',
# has a broken test, adding it to xfail does not work because it conflicts with tests/unit/utils/test_thin.py
'pytests/unit/utils/test_thin.py',
'transport/test_zeromq.py', # Leaks memory on SLE15SP2
'transport/test_tcp.py',
# Errors in 3003.3
'cloud/test_map.py'
],
'sles11sp4': [
# SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'modules/random_org_test.py',
'states/test_saltutil.py',
],
'rhel6': [
# SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'modules/random_org_test.py',
'states/test_saltutil.py',
],
'sles15': [
'utils/cache_mods/cache_mod.py',
'test_zypp_plugins.py',
'modules/test_yumpkg.py',
],
},
'xfail_list': {
'common': [
# fixed in saltstack/develop
# https://github.com/saltstack/salt/commit/7427e192baeccfee69b4887fe0c630a1afb38730#diff-3b5d15bc59b82fc8d4b15f819babf4faR70
'test_core.py::CoreGrainsTestCase::test_parse_etc_os_release',
'test_core.py::CoreGrainsTestCase::test_fqdns_socket_error',
'test_x509.py::X509TestCase::test_private_func__parse_subject',
'test_zypper.py::ZypperTestCase::test_list_pkgs_with_attr',
'test_zfs.py::ZfsUtilsTestCase::test_property_data_zpool',
'templates/jinja_test.py::TestCustomExtensions::test_serialize_yaml_unicode',
# not working in docker containers
'modules/cmdmod_test.py::CMDMODTestCase::test_run',
'conf_test.py::ConfTest::test_conf_cloud_maps_d_files_are_commented',
'conf_test.py::ConfTest::test_conf_cloud_profiles_d_files_are_commented',
'conf_test.py::ConfTest::test_conf_cloud_providers_d_files_are_commented',
'utils/extend_test.py::ExtendTestCase::test_run',
'beacons/glxinfo.py::GLXInfoBeaconTestCase::test_no_user',
'beacons/glxinfo.py::GLXInfoBeaconTestCase::test_non_dict_config',
# Boto failing tests
'modules/boto_apigateway_test.py::BotoApiGatewayTestCaseBase::runTest',
'modules/boto_cloudwatch_event_test.py::BotoCloudWatchEventTestCaseBase::runTest',
'modules/boto_cognitoidentity_test.py::BotoCognitoIdentityTestCaseBase::runTest',
'modules/boto_elasticsearch_domain_test.py::BotoElasticsearchDomainTestCaseBase::runTest',
'states/boto_apigateway_test.py::BotoApiGatewayStateTestCaseBase::runTest',
'states/boto_cognitoidentity_test.py::BotoCognitoIdentityStateTestCaseBase::runTest',
'states/boto_elasticsearch_domain_test.py::BotoElasticsearchDomainStateTestCaseBase::runTest',
'modules/inspect_collector_test.py::InspectorCollectorTestCase::test_file_tree',
'*CoreGrainsTestCase::test_linux_memdata',
'EtcdModTestCase',
'ConfTest::test_conf_master_sample_is_commented', # this is not passing because we have custom config by default (user "salt")
'test_cmdmod.py::CMDMODTestCase::test_run',
'fileserver/test_roots.py::RootsTest::test_symlink_list',
'modules/test_cmdmod.py::CMDMODTestCase::test_run', # test too slow
'*test_reactor.py::TestReactor::test_reactions',
'*test_reactor.py::TestReactor::test_list_reactors',
'*test_yumpkg.py::YumTestCase::test_list_pkgs_with_attr',
'*test_local_cache.py::Local_CacheTest::test_clean_old_jobs',
'*test_local_cache.py::Local_CacheTest::test_not_clean_new_jobs',
'*test_jinja.py::TestCustomExtensions::test_http_query',
'*test_conf.py::ConfTest::test_conf_master_sample_is_commented',
# After switch to M2Crypto
'modules/test_x509.py::X509TestCase::test_create_crl', # No OpenSSL available
'modules/test_x509.py::X509TestCase::test_revoke_certificate_with_crl', # No OpenSSL available
# Fails due to the async batch changes
'transport/test_ipc.py::IPCMessagePubSubCase::test_multi_client_reading',
# Needs investigation. Setting them to xfail to have a "new green start" on March 12th
# https://github.com/SUSE/spacewalk/issues/14263
'utils/test_jinja.py::TestCustomExtensions::test_json_query',
'utils/test_data.py::DataTestCase::test_json_query',
'states/test_syslog_ng.py::SyslogNGTestCase::test_started_state_generate_valid_cli_command',
'states/test_pip_state.py::PipStateTest::test_install_requirements_parsing',
'states/test_network.py::NetworkTestCase::test_managed',
'modules/test_zypperpkg.py::ZypperTestCase::test_upgrade_success',
'modules/test_zypperpkg.py::ZypperTestCase::test_search_not_found',
'modules/test_zypperpkg.py::ZypperTestCase::test_add_repo_key_path',
'modules/test_state.py::StateTestCase::test_show_sls',
'modules/test_serverdensity_device.py::ServerdensityDeviceTestCase::test_create',
'modules/test_redismod.py::RedismodTestCase::test_shutdown',
'modules/test_redismod.py::RedismodTestCase::test_ping',
'modules/test_netscaler.py::NetscalerTestCase::test_service_enable',
'modules/test_netscaler.py::NetscalerTestCase::test_service_disable',
'modules/test_keystone.py::KeystoneTestCase::test_user_get',
'modules/test_keystone.py::KeystoneTestCase::test_user_create',
'modules/test_keystone.py::KeystoneTestCase::test_tenant_get',
'modules/test_keystone.py::KeystoneTestCase::test_tenant_create',
'modules/test_keystone.py::KeystoneTestCase::test_role_get',
'modules/test_dpkg_lowpkg.py::DpkgTestCase::test_info',
'modules/test_cron.py::PsTestCase::test_list_tab',
'modules/test_aptpkg.py::AptPkgTestCase::test_info_installed_attr_without_status',
'grains/test_core.py::CoreGrainsTestCase::test_fqdn_return',
'grains/test_core.py::CoreGrainsTestCase::test_fqdn4_empty',
'cloud/clouds/test_ec2.py::EC2TestCase::test_termination_protection_exception',
'cloud/clouds/test_ec2.py::EC2TestCase::test_termination_protection',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_start_on_gather_job_timeout',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_start_on_batch_presence_ping_timeout',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_next',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_close_safe',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch__del__',
'beacons/test_cert_info.py::CertInfoBeaconTestCase::test_cert_information',
# These also need investigation, setting to xfail for a green start for 3002.2
'test_ext.py::VendorTornadoTest::test_vendored_tornado_import',
'test_loader.py::LoaderGlobalsTest::test_auth',
'test_loader.py::LoaderGlobalsTest::test_outputters',
'test_loader.py::LoaderGlobalsTest::test_pillars',
'test_loader.py::LoaderGlobalsTest::test_renderers',
'test_loader.py::LoaderGlobalsTest::test_returners',
'test_loader.py::LoaderGlobalsTest::test_runners',
'test_loader.py::LoaderGlobalsTest::test_serializers',
'test_loader.py::LoaderGlobalsTest::test_tops',
'grains/test_core.py::CoreGrainsTestCase::test_core_virtual_invalid',
'grains/test_core.py::CoreGrainsTestCase::test_core_virtual_unicode',
'grains/test_core.py::CoreGrainsTestCase::test_get_server_id',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'modules/test_aptpkg.py::AptPkgTestCase::test_list_repos',
'modules/test_parted_partition.py::PartedTestCase::test__is_fstype',
'modules/test_parted_partition.py::PartedTestCase::test_mkpartfs_to_mkpart',
'modules/test_zypperpkg.py::ZypperTestCase::test_list_pkgs_with_attr',
'utils/test_vmware.py::PrivateGetServiceInstanceTestCase::test_second_attempt_successful_connection',
'utils/test_vmware.py::PrivateGetServiceInstanceTestCase::test_third_attempt_successful_connection',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_default_params',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_no_cached_service_instance_same_host_on_proxy',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_uncached_service_instance',
'pytests/unit/modules/test_ansiblegate.py::test_ansible_module_call',
# Failing on 3003.3
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_no_updates',
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_telegram_return_no_updates_for_user',
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_telegram_returning_updates',
'modules/test_junos.py::Test_Junos_Module::test_get_table_api_error',
'modules/test_junos.py::Test_Junos_Module::test_get_table_connect_closed_error',
'modules/test_junos.py::Test_Junos_Module::test_get_table_inventory',
'modules/test_junos.py::Test_Junos_Module::test_get_table_no_path_inventory',
'modules/test_zcbuildout.py::BuildoutTestCase::test_get_bootstrap_url',
'modules/test_zcbuildout.py::BuildoutTestCase::test_get_buildout_ver',
'modules/test_zfs.py::ZfsTestCase::test_bookmark_success',
'modules/test_aptpkg.py::AptPkgTestCase::test_expand_repo_def',
'modules/test_cmdmod.py::test_run_cwd_in_combination_with_runas', # Fails on docker container
'states/test_pkgrepo.py::test_migrated_wrong_method',
],
'sles12sp1': [
'cloud/clouds/dimensiondata_test.py::DimensionDataTestCase::test_avail_sizes',
],
'sles12sp2': [
'cloud/clouds/dimensiondata_test.py::DimensionDataTestCase::test_avail_sizes',
],
'2016.11.4': [
'*network_test.py::NetworkTestCase::test_host_to_ips',
],
'sles15': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
],
'ubuntu1604': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
# Needs investigation. Setting them to xfail to have a "new green start" on March 19th
# https://github.com/SUSE/spacewalk/issues/14263
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_specified_archive_not_found_failure',
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_last_picked_archive_not_found_failure',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'cli/test_support.py::ProfileIntegrityTestCase::test_users_template_profile',
'cli/test_support.py::ProfileIntegrityTestCase::test_non_template_profiles_parseable',
'cli/test_support.py::ProfileIntegrityTestCase::test_jobs_trace_template_profile',
'transport/test_zeromq.py::PubServerChannel::test_issue_36469_tcp',
],
'ubuntu1804': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
# Needs investigation. Setting them to xfail to have a "new green start" on March 19th
# https://github.com/SUSE/spacewalk/issues/14263
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_specified_archive_not_found_failure',
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_last_picked_archive_not_found_failure',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'cli/test_support.py::ProfileIntegrityTestCase::test_users_template_profile',
'cli/test_support.py::ProfileIntegrityTestCase::test_non_template_profiles_parseable',
'cli/test_support.py::ProfileIntegrityTestCase::test_jobs_trace_template_profile',
# These also need investigation, setting to xfail for a green start for 3002.2
'transport/test_tcp.py::ClearReqTestCases::test_badload',
'transport/test_tcp.py::ClearReqTestCases::test_basic',
'transport/test_tcp.py::ClearReqTestCases::test_normalization',
'transport/test_tcp.py::AESReqTestCases::test_basic',
'transport/test_tcp.py::AESReqTestCases::test_normalization',
'transport/test_zeromq.py::ClearReqTestCases::test_badload',
'transport/test_zeromq.py::ClearReqTestCases::test_basic',
'transport/test_zeromq.py::ClearReqTestCases::test_normalization',
],
# ip_addrs() needs to be mocked for deterministic tests
"opensuse151": ['pytests/unit/utils/test_minions.py'],
"opensuse152": ['pytests/unit/utils/test_minions.py'],
"opensuse153": ['pytests/unit/utils/test_minions.py'],
}
}
KNOWN_ISSUES = {
'integration': KNOWN_ISSUES_INTEGRATION,
'unit': KNOWN_ISSUES_UNIT
}
def get_list(config, name):
version = os.environ.get('DISTRO')
flavor = os.environ.get('FLAVOR')
tests_type = config.getini('tests_type')
assert name in ['ignore_list', 'xfail_list']
result = (
KNOWN_ISSUES[tests_type][name].get('common', []) +
KNOWN_ISSUES[tests_type][name].get(flavor, []) +
KNOWN_ISSUES[tests_type][name].get(version, []) +
KNOWN_ISSUES[tests_type][name].get(
'{0}/{1}'.format(version, flavor), []) +
KNOWN_ISSUES[tests_type][name].get(
'{0}/{1}'.format(version, config.salt_version), []) +
KNOWN_ISSUES[tests_type][name].get(config.salt_version, [])
)
return ['*%s*' % it for it in result]
def pytest_ignore_collect(path, config):
return any(map(path.fnmatch, config.ignore_list))
def pytest_itemcollected(item):
matcher = partial(fnmatch, item.nodeid)
if any(map(matcher, item.config.xfail_list)):
item.add_marker(pytest.mark.xfail, "Xfailed by toaster")
elif any(map(matcher, item.config.ignore_list)):
item.add_marker(pytest.mark.skip, "Ignore by toaster")
| """
:codeauthor: <NAME> (<EMAIL>)
tests.conftest
~~~~~~~~~~~~~~
Prepare py.test for our test suite
"""
# pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated
# pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated
import logging
import os
import pathlib
import pprint
import re
import shutil
import ssl
import stat
import sys
import textwrap
from functools import partial, wraps
from unittest import TestCase # pylint: disable=blacklisted-module
import _pytest.logging
import _pytest.skipping
import psutil
import pytest
import salt._logging.impl
import salt.config
import salt.loader
import salt.log.mixins
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.win_functions
import salt.version
import saltfactories.utils.compat
from salt.serializers import yaml
from salt.utils.immutabletypes import freeze
from tests.support.helpers import (
PRE_PYTEST_SKIP_OR_NOT,
PRE_PYTEST_SKIP_REASON,
Webserver,
get_virtualenv_binary_path,
)
from tests.support.pytest.helpers import * # pylint: disable=unused-wildcard-import
from tests.support.runtests import RUNTIME_VARS
from tests.support.sminion import check_required_sminion_attributes, create_sminion
#
# Toaster specifics
import glob
from fnmatch import fnmatch
TESTS_DIR = pathlib.Path.cwd() / "tests"
PYTESTS_DIR = TESTS_DIR / "pytests"
CODE_DIR = TESTS_DIR.parent
# Change to code checkout directory
os.chdir(str(CODE_DIR))
# Make sure the current directory is the first item in sys.path
if str(CODE_DIR) in sys.path:
sys.path.remove(str(CODE_DIR))
sys.path.insert(0, str(CODE_DIR))
# Coverage
if "COVERAGE_PROCESS_START" in os.environ:
MAYBE_RUN_COVERAGE = True
COVERAGERC_FILE = os.environ["COVERAGE_PROCESS_START"]
else:
COVERAGERC_FILE = str(CODE_DIR / ".coveragerc")
MAYBE_RUN_COVERAGE = (
sys.argv[0].endswith("pytest.py") or "_COVERAGE_RCFILE" in os.environ
)
if MAYBE_RUN_COVERAGE:
# Flag coverage to track suprocesses by pointing it to the right .coveragerc file
os.environ["COVERAGE_PROCESS_START"] = str(COVERAGERC_FILE)
# Define the pytest plugins we rely on
pytest_plugins = ["tempdir", "helpers_namespace"]
# Define where not to collect tests from
collect_ignore = ["setup.py"]
# Patch PyTest logging handlers
class LogCaptureHandler(
salt.log.mixins.ExcInfoOnLogLevelFormatMixIn, _pytest.logging.LogCaptureHandler
):
"""
Subclassing PyTest's LogCaptureHandler in order to add the
exc_info_on_loglevel functionality and actually make it a NullHandler,
it's only used to print log messages emmited during tests, which we
have explicitly disabled in pytest.ini
"""
_pytest.logging.LogCaptureHandler = LogCaptureHandler
class LiveLoggingStreamHandler(
salt.log.mixins.ExcInfoOnLogLevelFormatMixIn,
_pytest.logging._LiveLoggingStreamHandler,
):
"""
Subclassing PyTest's LiveLoggingStreamHandler in order to add the
exc_info_on_loglevel functionality.
"""
_pytest.logging._LiveLoggingStreamHandler = LiveLoggingStreamHandler
# Reset logging root handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Reset the root logger to its default level(because salt changed it)
logging.root.setLevel(logging.WARNING)
log = logging.getLogger("salt.testsuite")
# ----- PyTest Tempdir Plugin Hooks --------------------------------------------------------------------------------->
def pytest_tempdir_basename():
"""
Return the temporary directory basename for the salt test suite.
"""
return "salt-tests-tmpdir"
# <---- PyTest Tempdir Plugin Hooks ----------------------------------------------------------------------------------
# ----- CLI Options Setup ------------------------------------------------------------------------------------------->
def pytest_addoption(parser):
"""
register argparse-style options and ini-style config values.
"""
test_selection_group = parser.getgroup("Tests Selection")
test_selection_group.addoption(
"--from-filenames",
default=None,
help=(
"Pass a comma-separated list of file paths, and any test module which corresponds to the "
"specified file(s) will run. For example, if 'setup.py' was passed, then the corresponding "
"test files defined in 'tests/filename_map.yml' would run. Absolute paths are assumed to be "
"files containing relative paths, one per line. Providing the paths in a file can help get "
"around shell character limits when the list of files is long."
),
)
# Add deprecated CLI flag until we completely switch to PyTest
test_selection_group.addoption(
"--names-file", default=None, help="Deprecated option"
)
test_selection_group.addoption(
"--transport",
default="zeromq",
choices=("zeromq", "tcp"),
help=(
"Select which transport to run the integration tests with, zeromq or tcp. Default: %(default)s"
),
)
test_selection_group.addoption(
"--ssh",
"--ssh-tests",
dest="ssh",
action="store_true",
default=False,
help="Run salt-ssh tests. These tests will spin up a temporary "
"SSH server on your machine. In certain environments, this "
"may be insecure! Default: False",
)
test_selection_group.addoption(
"--proxy",
"--proxy-tests",
dest="proxy",
action="store_true",
default=False,
help="Run proxy tests",
)
test_selection_group.addoption(
"--run-slow", action="store_true", default=False, help="Run slow tests.",
)
output_options_group = parser.getgroup("Output Options")
output_options_group.addoption(
"--output-columns",
default=80,
type=int,
help="Number of maximum columns to use on the output",
)
output_options_group.addoption(
"--no-colors",
"--no-colours",
default=False,
action="store_true",
help="Disable colour printing.",
)
# ----- Test Groups --------------------------------------------------------------------------------------------->
# This will allow running the tests in chunks
test_selection_group.addoption(
"--test-group-count",
dest="test-group-count",
type=int,
help="The number of groups to split the tests into",
)
test_selection_group.addoption(
"--test-group",
dest="test-group",
type=int,
help="The group of tests that should be executed",
)
# <---- Test Groups ----------------------------------------------------------------------------------------------
# Toaster specific
parser.addini("tests_type", help="Type of the tests being run", default='unit')
# <---- CLI Options Setup --------------------------------------------------------------------------------------------
# ----- Register Markers -------------------------------------------------------------------------------------------->
@pytest.mark.trylast
def pytest_configure(config):
"""
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
"""
for dirname in CODE_DIR.iterdir():
if not dirname.is_dir():
continue
if dirname != TESTS_DIR:
config.addinivalue_line("norecursedirs", str(CODE_DIR / dirname))
# Expose the markers we use to pytest CLI
config.addinivalue_line(
"markers",
"requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
)
config.addinivalue_line(
"markers",
"requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
)
config.addinivalue_line(
"markers", "windows_whitelisted: Mark test as whitelisted to run under Windows"
)
config.addinivalue_line(
"markers", "requires_sshd_server: Mark test that require an SSH server running"
)
# Make sure the test suite "knows" this is a pytest test run
RUNTIME_VARS.PYTEST_SESSION = True
# "Flag" the slotTest decorator if we're skipping slow tests or not
os.environ["SLOW_TESTS"] = str(config.getoption("--run-slow"))
# Toaster specific
config.salt_version = salt.version.__version__
config.xfail_list = get_list(config, 'xfail_list')
config.ignore_list = get_list(config, 'ignore_list')
# <---- Register Markers ---------------------------------------------------------------------------------------------
# ----- PyTest Tweaks ----------------------------------------------------------------------------------------------->
def set_max_open_files_limits(min_soft=3072, min_hard=4096):
# Get current limits
if salt.utils.platform.is_windows():
import win32file
prev_hard = win32file._getmaxstdio()
prev_soft = 512
else:
import resource
prev_soft, prev_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
# Check minimum required limits
set_limits = False
if prev_soft < min_soft:
soft = min_soft
set_limits = True
else:
soft = prev_soft
if prev_hard < min_hard:
hard = min_hard
set_limits = True
else:
hard = prev_hard
# Increase limits
if set_limits:
log.debug(
" * Max open files settings is too low (soft: %s, hard: %s) for running the tests. "
"Trying to raise the limits to soft: %s, hard: %s",
prev_soft,
prev_hard,
soft,
hard,
)
try:
if salt.utils.platform.is_windows():
hard = 2048 if hard > 2048 else hard
win32file._setmaxstdio(hard)
else:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except Exception as err: # pylint: disable=broad-except
log.error(
"Failed to raise the max open files settings -> %s. Please issue the following command "
"on your console: 'ulimit -u %s'",
err,
soft,
)
exit(1)
return soft, hard
def pytest_report_header():
soft, hard = set_max_open_files_limits()
return "max open files; soft: {}; hard: {}".format(soft, hard)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection_modifyitems(config, items):
"""
called after collection has been performed, may filter or re-order
the items in-place.
:param _pytest.main.Session session: the pytest session object
:param _pytest.config.Config config: pytest config object
:param List[_pytest.nodes.Item] items: list of item objects
"""
# Let PyTest or other plugins handle the initial collection
yield
groups_collection_modifyitems(config, items)
from_filenames_collection_modifyitems(config, items)
log.warning("Mofifying collected tests to keep track of fixture usage")
for item in items:
for fixture in item.fixturenames:
if fixture not in item._fixtureinfo.name2fixturedefs:
continue
for fixturedef in item._fixtureinfo.name2fixturedefs[fixture]:
if fixturedef.scope != "package":
continue
try:
fixturedef.finish.__wrapped__
except AttributeError:
original_func = fixturedef.finish
def wrapper(func, fixturedef):
@wraps(func)
def wrapped(self, request, nextitem=False):
try:
return self._finished
except AttributeError:
if nextitem:
fpath = pathlib.Path(self.baseid).resolve()
tpath = pathlib.Path(
nextitem.fspath.strpath
).resolve()
try:
tpath.relative_to(fpath)
# The test module is within the same package that the fixture is
if (
not request.session.shouldfail
and not request.session.shouldstop
):
log.debug(
"The next test item is still under the fixture package path. "
"Not terminating %s",
self,
)
return
except ValueError:
pass
log.debug("Finish called on %s", self)
try:
return func(request)
except BaseException as exc: # pylint: disable=broad-except
pytest.fail(
"Failed to run finish() on {}: {}".format(
fixturedef, exc
),
pytrace=True,
)
finally:
self._finished = True
return partial(wrapped, fixturedef)
fixturedef.finish = wrapper(fixturedef.finish, fixturedef)
try:
fixturedef.finish.__wrapped__
except AttributeError:
fixturedef.finish.__wrapped__ = original_func
@pytest.hookimpl(trylast=True, hookwrapper=True)
def pytest_runtest_protocol(item, nextitem):
"""
implements the runtest_setup/call/teardown protocol for
the given test item, including capturing exceptions and calling
reporting hooks.
:arg item: test item for which the runtest protocol is performed.
:arg nextitem: the scheduled-to-be-next test item (or None if this
is the end my friend). This argument is passed on to
:py:func:`pytest_runtest_teardown`.
:return boolean: True if no further hook implementations should be invoked.
Stops at first non-None result, see :ref:`firstresult`
"""
request = item._request
used_fixture_defs = []
for fixture in item.fixturenames:
if fixture not in item._fixtureinfo.name2fixturedefs:
continue
for fixturedef in reversed(item._fixtureinfo.name2fixturedefs[fixture]):
if fixturedef.scope != "package":
continue
used_fixture_defs.append(fixturedef)
try:
# Run the test
yield
finally:
for fixturedef in used_fixture_defs:
fixturedef.finish(request, nextitem=nextitem)
del request
del used_fixture_defs
# <---- PyTest Tweaks ------------------------------------------------------------------------------------------------
# ----- Test Setup -------------------------------------------------------------------------------------------------->
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
Fixtures injection based on markers or test skips based on CLI arguments
"""
integration_utils_tests_path = str(TESTS_DIR / "integration" / "utils")
if (
str(item.fspath).startswith(integration_utils_tests_path)
and PRE_PYTEST_SKIP_OR_NOT is True
):
item._skipped_by_mark = True
pytest.skip(PRE_PYTEST_SKIP_REASON)
if saltfactories.utils.compat.has_unittest_attr(item, "__slow_test__"):
if item.config.getoption("--run-slow") is False:
item._skipped_by_mark = True
pytest.skip("Slow tests are disabled!")
requires_sshd_server_marker = item.get_closest_marker("requires_sshd_server")
if requires_sshd_server_marker is not None:
if not item.config.getoption("--ssh-tests"):
item._skipped_by_mark = True
pytest.skip("SSH tests are disabled, pass '--ssh-tests' to enable them.")
item.fixturenames.append("sshd_server")
requires_salt_modules_marker = item.get_closest_marker("requires_salt_modules")
if requires_salt_modules_marker is not None:
required_salt_modules = requires_salt_modules_marker.args
if len(required_salt_modules) == 1 and isinstance(
required_salt_modules[0], (list, tuple, set)
):
required_salt_modules = required_salt_modules[0]
required_salt_modules = set(required_salt_modules)
not_available_modules = check_required_sminion_attributes(
"functions", required_salt_modules
)
if not_available_modules:
item._skipped_by_mark = True
if len(not_available_modules) == 1:
pytest.skip(
"Salt module '{}' is not available".format(*not_available_modules)
)
pytest.skip(
"Salt modules not available: {}".format(
", ".join(not_available_modules)
)
)
requires_salt_states_marker = item.get_closest_marker("requires_salt_states")
if requires_salt_states_marker is not None:
required_salt_states = requires_salt_states_marker.args
if len(required_salt_states) == 1 and isinstance(
required_salt_states[0], (list, tuple, set)
):
required_salt_states = required_salt_states[0]
required_salt_states = set(required_salt_states)
not_available_states = check_required_sminion_attributes(
"states", required_salt_states
)
if not_available_states:
item._skipped_by_mark = True
if len(not_available_states) == 1:
pytest.skip(
"Salt state module '{}' is not available".format(
*not_available_states
)
)
pytest.skip(
"Salt state modules not available: {}".format(
", ".join(not_available_states)
)
)
if salt.utils.platform.is_windows():
unit_tests_paths = (
str(TESTS_DIR / "unit"),
str(PYTESTS_DIR / "unit"),
)
if not str(pathlib.Path(item.fspath).resolve()).startswith(unit_tests_paths):
# Unit tests are whitelisted on windows by default, so, we're only
# after all other tests
windows_whitelisted_marker = item.get_closest_marker("windows_whitelisted")
if windows_whitelisted_marker is None:
item._skipped_by_mark = True
pytest.skip("Test is not whitelisted for Windows")
# <---- Test Setup ---------------------------------------------------------------------------------------------------
# ----- Test Groups Selection --------------------------------------------------------------------------------------->
def get_group_size_and_start(total_items, total_groups, group_id):
"""
Calculate group size and start index.
"""
base_size = total_items // total_groups
rem = total_items % total_groups
start = base_size * (group_id - 1) + min(group_id - 1, rem)
size = base_size + 1 if group_id <= rem else base_size
return (start, size)
def get_group(items, total_groups, group_id):
"""
Get the items from the passed in group based on group size.
"""
if not 0 < group_id <= total_groups:
raise ValueError("Invalid test-group argument")
start, size = get_group_size_and_start(len(items), total_groups, group_id)
selected = items[start : start + size]
deselected = items[:start] + items[start + size :]
assert len(selected) + len(deselected) == len(items)
return selected, deselected
def groups_collection_modifyitems(config, items):
group_count = config.getoption("test-group-count")
group_id = config.getoption("test-group")
if not group_count or not group_id:
# We're not selection tests using groups, don't do any filtering
return
total_items = len(items)
tests_in_group, deselected = get_group(items, group_count, group_id)
# Replace all items in the list
items[:] = tests_in_group
if deselected:
config.hook.pytest_deselected(items=deselected)
terminal_reporter = config.pluginmanager.get_plugin("terminalreporter")
terminal_reporter.write(
"Running test group #{} ({} tests)\n".format(group_id, len(items)), yellow=True,
)
# <---- Test Groups Selection ----------------------------------------------------------------------------------------
# ----- Fixtures Overrides ------------------------------------------------------------------------------------------>
@pytest.fixture(scope="session")
def salt_factories_config():
"""
Return a dictionary with the keyworkd arguments for FactoriesManager
"""
return {
"code_dir": str(CODE_DIR),
"inject_coverage": MAYBE_RUN_COVERAGE,
"inject_sitecustomize": MAYBE_RUN_COVERAGE,
"start_timeout": 120
if (os.environ.get("JENKINS_URL") or os.environ.get("CI"))
else 60,
}
# <---- Fixtures Overrides -------------------------------------------------------------------------------------------
# ----- Salt Factories ---------------------------------------------------------------------------------------------->
@pytest.fixture(scope="session")
def integration_files_dir(salt_factories):
"""
Fixture which returns the salt integration files directory path.
Creates the directory if it does not yet exist.
"""
dirname = salt_factories.root_dir / "integration-files"
dirname.mkdir(exist_ok=True)
for child in (PYTESTS_DIR / "integration" / "files").iterdir():
if child.is_dir():
shutil.copytree(str(child), str(dirname / child.name))
else:
shutil.copyfile(str(child), str(dirname / child.name))
return dirname
@pytest.fixture(scope="session")
def state_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt state tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir / "state-tree"
dirname.mkdir(exist_ok=True)
return dirname
@pytest.fixture(scope="session")
def pillar_tree_root_dir(integration_files_dir):
"""
Fixture which returns the salt pillar tree root directory path.
Creates the directory if it does not yet exist.
"""
dirname = integration_files_dir / "pillar-tree"
dirname.mkdir(exist_ok=True)
return dirname
@pytest.fixture(scope="session")
def base_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt base environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir / "base"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_STATE_TREE = str(dirname.resolve())
RUNTIME_VARS.TMP_BASEENV_STATE_TREE = RUNTIME_VARS.TMP_STATE_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_state_tree_root_dir(state_tree_root_dir):
"""
Fixture which returns the salt prod environment state tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = state_tree_root_dir / "prod"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PRODENV_STATE_TREE = str(dirname.resolve())
return dirname
@pytest.fixture(scope="session")
def base_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt base environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir / "base"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PILLAR_TREE = str(dirname.resolve())
RUNTIME_VARS.TMP_BASEENV_PILLAR_TREE = RUNTIME_VARS.TMP_PILLAR_TREE
return dirname
@pytest.fixture(scope="session")
def prod_env_pillar_tree_root_dir(pillar_tree_root_dir):
"""
Fixture which returns the salt prod environment pillar tree directory path.
Creates the directory if it does not yet exist.
"""
dirname = pillar_tree_root_dir / "prod"
dirname.mkdir(exist_ok=True)
RUNTIME_VARS.TMP_PRODENV_PILLAR_TREE = str(dirname.resolve())
return dirname
@pytest.fixture(scope="session")
def salt_syndic_master_factory(
request,
salt_factories,
base_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_state_tree_root_dir,
prod_env_pillar_tree_root_dir,
):
root_dir = salt_factories.get_root_dir_for_daemon("syndic_master")
conf_dir = root_dir / "conf"
conf_dir.mkdir(exist_ok=True)
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "syndic_master")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = str(root_dir / "salt_ssh_known_hosts")
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = str(root_dir)
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = request.config.getoption("--transport")
config_overrides = {}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = str(root_dir / "extension_modules")
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = str(root_dir / "autosign_file")
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
str(base_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
str(prod_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
str(base_env_pillar_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [str(prod_env_pillar_tree_root_dir)],
},
}
)
factory = salt_factories.get_salt_master_daemon(
"syndic_master",
order_masters=True,
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_syndic_factory(salt_factories, salt_syndic_master_factory):
config_defaults = {"master": None, "minion": None, "syndic": None}
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "syndic")) as rfh:
opts = yaml.deserialize(rfh.read())
opts["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
opts["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
opts["transport"] = salt_syndic_master_factory.config["transport"]
config_defaults["syndic"] = opts
factory = salt_syndic_master_factory.get_salt_syndic_daemon(
"syndic",
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_master_factory(
salt_factories,
salt_syndic_master_factory,
base_env_state_tree_root_dir,
base_env_pillar_tree_root_dir,
prod_env_state_tree_root_dir,
prod_env_pillar_tree_root_dir,
):
root_dir = salt_factories.get_root_dir_for_daemon("master")
conf_dir = root_dir / "conf"
conf_dir.mkdir(exist_ok=True)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "master")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
tests_known_hosts_file = str(root_dir / "salt_ssh_known_hosts")
with salt.utils.files.fopen(tests_known_hosts_file, "w") as known_hosts:
known_hosts.write("")
config_defaults["root_dir"] = str(root_dir)
config_defaults["known_hosts_file"] = tests_known_hosts_file
config_defaults["syndic_master"] = "localhost"
config_defaults["transport"] = salt_syndic_master_factory.config["transport"]
config_defaults["reactor"] = [
{"salt/test/reactor": [os.path.join(RUNTIME_VARS.FILES, "reactor-test.sls")]}
]
config_overrides = {}
ext_pillar = []
if salt.utils.platform.is_windows():
ext_pillar.append(
{"cmd_yaml": "type {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
else:
ext_pillar.append(
{"cmd_yaml": "cat {}".format(os.path.join(RUNTIME_VARS.FILES, "ext.yaml"))}
)
ext_pillar.append(
{
"file_tree": {
"root_dir": os.path.join(RUNTIME_VARS.PILLAR_DIR, "base", "file_tree"),
"follow_dir_links": False,
"keep_newline": True,
}
}
)
config_overrides["pillar_opts"] = True
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
extension_modules_path = str(root_dir / "extension_modules")
if not os.path.exists(extension_modules_path):
shutil.copytree(
os.path.join(RUNTIME_VARS.FILES, "extension_modules"),
extension_modules_path,
)
# Copy the autosign_file to the new master root_dir
autosign_file_path = str(root_dir / "autosign_file")
shutil.copyfile(
os.path.join(RUNTIME_VARS.FILES, "autosign_file"), autosign_file_path
)
# all read, only owner write
autosign_file_permissions = (
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
)
os.chmod(autosign_file_path, autosign_file_permissions)
config_overrides.update(
{
"ext_pillar": ext_pillar,
"extension_modules": extension_modules_path,
"file_roots": {
"base": [
str(base_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "base"),
],
# Alternate root to test __env__ choices
"prod": [
str(prod_env_state_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "file", "prod"),
],
},
"pillar_roots": {
"base": [
str(base_env_pillar_tree_root_dir),
os.path.join(RUNTIME_VARS.FILES, "pillar", "base"),
],
"prod": [str(prod_env_pillar_tree_root_dir)],
},
}
)
# Let's copy over the test cloud config files and directories into the running master config directory
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if not entry.startswith("cloud"):
continue
source = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
dest = str(conf_dir / entry)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copyfile(source, dest)
factory = salt_syndic_master_factory.get_salt_master_daemon(
"master",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
return factory
@pytest.fixture(scope="session")
def salt_minion_factory(salt_master_factory):
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "minion")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
config_overrides = {
"file_roots": salt_master_factory.config["file_roots"].copy(),
"pillar_roots": salt_master_factory.config["pillar_roots"].copy(),
}
virtualenv_binary = get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
factory = salt_master_factory.get_salt_minion_daemon(
"minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_sub_minion_factory(salt_master_factory):
with salt.utils.files.fopen(
os.path.join(RUNTIME_VARS.CONF_DIR, "sub_minion")
) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
config_overrides = {
"file_roots": salt_master_factory.config["file_roots"].copy(),
"pillar_roots": salt_master_factory.config["pillar_roots"].copy(),
}
virtualenv_binary = get_virtualenv_binary_path()
if virtualenv_binary:
config_overrides["venv_bin"] = virtualenv_binary
factory = salt_master_factory.get_salt_minion_daemon(
"sub_minion",
config_defaults=config_defaults,
config_overrides=config_overrides,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_proxy_factory(salt_factories, salt_master_factory):
proxy_minion_id = "proxytest"
root_dir = salt_factories.get_root_dir_for_daemon(proxy_minion_id)
conf_dir = root_dir / "conf"
conf_dir.mkdir(parents=True, exist_ok=True)
RUNTIME_VARS.TMP_PROXY_CONF_DIR = str(conf_dir)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.CONF_DIR, "proxy")) as rfh:
config_defaults = yaml.deserialize(rfh.read())
config_defaults["root_dir"] = str(root_dir)
config_defaults["hosts.file"] = os.path.join(RUNTIME_VARS.TMP, "hosts")
config_defaults["aliases.file"] = os.path.join(RUNTIME_VARS.TMP, "aliases")
config_defaults["transport"] = salt_master_factory.config["transport"]
factory = salt_master_factory.get_salt_proxy_minion_daemon(
proxy_minion_id,
config_defaults=config_defaults,
extra_cli_arguments_after_first_start_failure=["--log-level=debug"],
)
factory.register_after_terminate_callback(
pytest.helpers.remove_stale_minion_key, salt_master_factory, factory.id
)
return factory
@pytest.fixture(scope="session")
def salt_cli(salt_master_factory):
return salt_master_factory.get_salt_cli()
@pytest.fixture(scope="session")
def salt_cp_cli(salt_master_factory):
return salt_master_factory.get_salt_cp_cli()
@pytest.fixture(scope="session")
def salt_key_cli(salt_master_factory):
return salt_master_factory.get_salt_key_cli()
@pytest.fixture(scope="session")
def salt_run_cli(salt_master_factory):
return salt_master_factory.get_salt_run_cli()
@pytest.fixture(scope="session")
def salt_ssh_cli(salt_master_factory):
return salt_master_factory.get_salt_ssh_cli()
@pytest.fixture(scope="session")
def salt_call_cli(salt_minion_factory):
return salt_minion_factory.get_salt_call_cli()
@pytest.fixture(scope="session", autouse=True)
def bridge_pytest_and_runtests(
reap_stray_processes,
salt_factories,
salt_syndic_master_factory,
salt_syndic_factory,
salt_master_factory,
salt_minion_factory,
salt_sub_minion_factory,
sshd_config_dir,
):
# Make sure unittest2 uses the pytest generated configuration
RUNTIME_VARS.RUNTIME_CONFIGS["master"] = freeze(salt_master_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["minion"] = freeze(salt_minion_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["sub_minion"] = freeze(salt_sub_minion_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic_master"] = freeze(
salt_syndic_master_factory.config
)
RUNTIME_VARS.RUNTIME_CONFIGS["syndic"] = freeze(salt_syndic_factory.config)
RUNTIME_VARS.RUNTIME_CONFIGS["client_config"] = freeze(
salt.config.client_config(salt_master_factory.config["conf_file"])
)
# Make sure unittest2 classes know their paths
RUNTIME_VARS.TMP_ROOT_DIR = str(salt_factories.root_dir.resolve())
RUNTIME_VARS.TMP_CONF_DIR = os.path.dirname(salt_master_factory.config["conf_file"])
RUNTIME_VARS.TMP_MINION_CONF_DIR = os.path.dirname(
salt_minion_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR = os.path.dirname(
salt_sub_minion_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR = os.path.dirname(
salt_syndic_master_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR = os.path.dirname(
salt_syndic_factory.config["conf_file"]
)
RUNTIME_VARS.TMP_SSH_CONF_DIR = str(sshd_config_dir)
@pytest.fixture(scope="session")
def sshd_config_dir(salt_factories):
config_dir = salt_factories.get_root_dir_for_daemon("sshd")
yield config_dir
shutil.rmtree(str(config_dir), ignore_errors=True)
@pytest.fixture(scope="module")
def sshd_server(salt_factories, sshd_config_dir, salt_master):
sshd_config_dict = {
"Protocol": "2",
# Turn strict modes off so that we can operate in /tmp
"StrictModes": "no",
# Logging
"SyslogFacility": "AUTH",
"LogLevel": "INFO",
# Authentication:
"LoginGraceTime": "120",
"PermitRootLogin": "without-password",
"PubkeyAuthentication": "yes",
# Don't read the user's ~/.rhosts and ~/.shosts files
"IgnoreRhosts": "yes",
"HostbasedAuthentication": "no",
# To enable empty passwords, change to yes (NOT RECOMMENDED)
"PermitEmptyPasswords": "no",
# Change to yes to enable challenge-response passwords (beware issues with
# some PAM modules and threads)
"ChallengeResponseAuthentication": "no",
# Change to no to disable tunnelled clear text passwords
"PasswordAuthentication": "no",
"X11Forwarding": "no",
"X11DisplayOffset": "10",
"PrintMotd": "no",
"PrintLastLog": "yes",
"TCPKeepAlive": "yes",
"AcceptEnv": "LANG LC_*",
"Subsystem": "sftp /usr/lib/openssh/sftp-server",
"UsePAM": "yes",
}
factory = salt_factories.get_sshd_daemon(
sshd_config_dict=sshd_config_dict, config_dir=sshd_config_dir,
)
# We also need a salt-ssh roster config file
roster_path = pathlib.Path(salt_master.config_dir) / "roster"
roster_contents = textwrap.dedent(
"""\
localhost:
host: 127.0.0.1
port: {}
user: {}
mine_functions:
test.arg: ['itworked']
""".format(
factory.listen_port, RUNTIME_VARS.RUNNING_TESTS_USER
)
)
if salt.utils.platform.is_darwin():
roster_contents += " set_path: $PATH:/usr/local/bin/\n"
log.debug(
"Writing to configuration file %s. Configuration:\n%s",
roster_path,
roster_contents,
)
with salt.utils.files.fopen(str(roster_path), "w") as wfh:
wfh.write(roster_contents)
with factory.started():
yield factory
if roster_path.exists():
roster_path.unlink()
# <---- Salt Factories -----------------------------------------------------------------------------------------------
# ----- From Filenames Test Selection ------------------------------------------------------------------------------->
def _match_to_test_file(match):
parts = match.split(".")
parts[-1] += ".py"
return TESTS_DIR.joinpath(*parts).relative_to(CODE_DIR)
def from_filenames_collection_modifyitems(config, items):
from_filenames = config.getoption("--from-filenames")
if not from_filenames:
# Don't do anything
return
test_categories_paths = (
(TESTS_DIR / "integration").relative_to(CODE_DIR),
(TESTS_DIR / "multimaster").relative_to(CODE_DIR),
(TESTS_DIR / "unit").relative_to(CODE_DIR),
(PYTESTS_DIR / "e2e").relative_to(CODE_DIR),
(PYTESTS_DIR / "functional").relative_to(CODE_DIR),
(PYTESTS_DIR / "integration").relative_to(CODE_DIR),
(PYTESTS_DIR / "unit").relative_to(CODE_DIR),
)
test_module_paths = set()
from_filenames_listing = set()
for path in [pathlib.Path(path.strip()) for path in from_filenames.split(",")]:
if path.is_absolute():
# In this case, this path is considered to be a file containing a line separated list
# of files to consider
with salt.utils.files.fopen(str(path)) as rfh:
for line in rfh:
line_path = pathlib.Path(line.strip())
if not line_path.exists():
continue
from_filenames_listing.add(line_path)
continue
from_filenames_listing.add(path)
filename_map = yaml.deserialize((TESTS_DIR / "filename_map.yml").read_text())
# Let's add the match all rule
for rule, matches in filename_map.items():
if rule == "*":
for match in matches:
test_module_paths.add(_match_to_test_file(match))
break
# Let's now go through the list of files gathered
for filename in from_filenames_listing:
if str(filename).startswith("tests/"):
# Tests in the listing don't require additional matching and will be added to the
# list of tests to run
test_module_paths.add(filename)
continue
if filename.name == "setup.py" or str(filename).startswith("salt/"):
if path.name == "__init__.py":
# No direct macthing
continue
# Now let's try a direct match between the passed file and possible test modules
for test_categories_path in test_categories_paths:
test_module_path = test_categories_path / "test_{}".format(path.name)
if test_module_path.is_file():
test_module_paths.add(test_module_path)
continue
# Do we have an entry in tests/filename_map.yml
for rule, matches in filename_map.items():
if rule == "*":
continue
elif "|" in rule:
# This is regex
if re.match(rule, str(filename)):
for match in matches:
test_module_paths.add(_match_to_test_file(match))
elif "*" in rule or "\\" in rule:
# Glob matching
for filerule in CODE_DIR.glob(rule):
if not filerule.exists():
continue
filerule = filerule.relative_to(CODE_DIR)
if filerule != filename:
continue
for match in matches:
test_module_paths.add(_match_to_test_file(match))
else:
if str(filename) != rule:
continue
# Direct file paths as rules
filerule = pathlib.Path(rule)
if not filerule.exists():
continue
for match in matches:
test_module_paths.add(_match_to_test_file(match))
continue
else:
log.debug("Don't know what to do with path %s", filename)
selected = []
deselected = []
for item in items:
itempath = pathlib.Path(str(item.fspath)).resolve().relative_to(CODE_DIR)
if itempath in test_module_paths:
selected.append(item)
else:
deselected.append(item)
items[:] = selected
if deselected:
config.hook.pytest_deselected(items=deselected)
# <---- From Filenames Test Selection --------------------------------------------------------------------------------
# ----- Custom Fixtures --------------------------------------------------------------------------------------------->
@pytest.fixture(scope="session")
def reap_stray_processes():
# Run tests
yield
children = psutil.Process(os.getpid()).children(recursive=True)
if not children:
log.info("No astray processes found")
return
def on_terminate(proc):
log.debug("Process %s terminated with exit code %s", proc, proc.returncode)
if children:
# Reverse the order, sublings first, parents after
children.reverse()
log.warning(
"Test suite left %d astray processes running. Killing those processes:\n%s",
len(children),
pprint.pformat(children),
)
_, alive = psutil.wait_procs(children, timeout=3, callback=on_terminate)
for child in alive:
try:
child.kill()
except psutil.NoSuchProcess:
continue
_, alive = psutil.wait_procs(alive, timeout=3, callback=on_terminate)
if alive:
# Give up
for child in alive:
log.warning(
"Process %s survived SIGKILL, giving up:\n%s",
child,
pprint.pformat(child.as_dict()),
)
@pytest.fixture(scope="session")
def sminion():
return create_sminion()
@pytest.fixture(scope="session")
def grains(sminion):
return sminion.opts["grains"].copy()
@pytest.fixture
def ssl_webserver(integration_files_dir, scope="module"):
"""
spins up an https webserver.
"""
if sys.version_info < (3, 5, 3):
pytest.skip("Python versions older than 3.5.3 do not define `ssl.PROTOCOL_TLS`")
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(
str(integration_files_dir / "https" / "cert.pem"),
str(integration_files_dir / "https" / "key.pem"),
)
webserver = Webserver(root=str(integration_files_dir), ssl_opts=context)
webserver.start()
yield webserver
webserver.stop()
# <---- Custom Fixtures ----------------------------------------------------------------------------------------------
KNOWN_ISSUES_INTEGRATION = {
'ignore_list': {
'common': [
'tests/integration/externalapi/test_venafiapi.py',
'test_state.py::OrchEventTest::test_parallel_orchestrations',
'test_state.py::StateModuleTest::test_requisites_onfail_any',
'files/file/base/*', # should no be included
'utils/test_reactor.py', # not yet implemented
'*::SaltnadoTestCase::*', # these are not actual tests
'cloud/providers/msazure.py',
'modules/git.py',
'cloud/helpers/virtualbox.py',
'utils/*',
# Running following tests causes unsuccessfully close
# of forked processes. This will cause "hanging" jenkins jobs.
'states/supervisord.py',
'*::MasterTest::test_exit_status_correct_usage',
'*::ProxyTest::test_exit_status_correct_usage',
'*::FileTest::test_issue_2227_file_append',
'*::FileTest::test_issue_8947_utf8_sls',
# Evil test
'reactor/reactor.py', # This test causes "py.test" never finishes
# 'runners/fileserver.py::FileserverTest::test_clear_file_list_cache', # this test hangs
'runners/fileserver.py', # workaround for comment above
# 'wheel/key.py::KeyWheelModuleTest::test_list_all', # ERROR at teardown
'*/wheel/key.py', # workaround for comment above
'*/wheel/client.py',
'*/virtualenv.py',
'*/states/user.py',
'*states/svn.py',
'*/kitchen/tests/wordpress/*',
'pillar/test_git_pillar.py',
# We are not interested in the NetapiClientTests
'*/netapi/test_client.py',
# This makes a request to github.com
'*/modules/ssh.py',
# CRON is not installed on toaster images and cron tests are not designed for SUSE.
'*/states/test_cron.py',
# NEED INVESTIGATION
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_multi_local_async_post',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_multi_local_async_post_multitoken',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_async_post',
'*rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_runner_post',
'*/test_state.py::StateModuleTest::test_onchanges_in_requisite',
'*/test_state.py::StateModuleTest::test_onchanges_requisite',
'*/test_state.py::StateModuleTest::test_onchanges_requisite_multiple',
'*/test_state.py::StateModuleTest::test_requisites_onchanges_any',
'*/runners/test_state.py::StateRunnerTest::test_orchestrate_retcode',
'*/shell/test_call.py::CallTest::test_issue_14979_output_file_permissions',
'*/shell/test_call.py::CallTest::test_issue_15074_output_file_append',
'*/shell/test_call.py::CallTest::test_issue_2731_masterless',
'*/modules/ssh.py',
'*/proxy/test_shell.py', # proxy minion is not starting
# After switch to M2Crypto
'cloud/clouds/test_digitalocean.py', # ModuleNotFoundError: No module named 'Crypto'
],
'rhel6': [
# Avoid error due:
# [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'*/modules/gem.py',
],
# disable 2017.7.1 on python 2.6
'rhel6/products-next': ['*'],
'sles11sp3/products-next': ['*'],
'sles11sp4/products-next': ['*'],
'sles11sp3': ['*/modules/gem.py', '*/modules/ssh.py'],
'sles11sp4': ['*/modules/gem.py', '*/modules/ssh.py'],
},
'xfail_list': {
'common': [
# Always failing
'*sysmod.py::SysModuleTest::test_valid_docs',
'cloud/providers/virtualbox.py::BaseVirtualboxTests::test_get_manager',
'modules/timezone.py::TimezoneLinuxModuleTest::test_get_hwclock',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_develop',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_head',
'states/git.py::GitTest::test_latest_fast_forward',
'states/git.py::LocalRepoGitTest::test_renamed_default_branch',
'loader/ext_grains.py::LoaderGrainsTest::test_grains_overwrite',
'loader/ext_modules.py::LoaderOverridesTest::test_overridden_internal',
'modules/decorators.py::DecoratorTest::test_depends',
'modules/decorators.py::DecoratorTest::test_depends_will_not_fallback',
'modules/decorators.py::DecoratorTest::test_missing_depends_will_fallback',
# Sometimes failing in jenkins.
'shell/call.py::CallTest::test_issue_14979_output_file_permissions',
'shell/call.py::CallTest::test_issue_15074_output_file_append',
'shell/call.py::CallTest::test_issue_2731_masterless',
'shell/matcher.py::MatchTest::test_grain',
'netapi/rest_tornado/test_app.py::TestSaltAPIHandler::test_simple_local_post_only_dictionary_request',
'shell/master_tops.py::MasterTopsTest::test_custom_tops_gets_utilized',
'states/svn.py::SvnTest::test_latest', # sles12sp1
'states/svn.py::SvnTest::test_latest_empty_dir', # sles12sp1
'runners/state.py::StateRunnerTest::test_orchestrate_output', # sles12sp1 rhel7
'modules/test_saltutil.py::SaltUtilSyncPillarTest::test_pillar_refresh', # sles12sp2
'*::test_issue_7754',
'*test_fileserver.py::FileserverTest::test_symlink_list',
'*test_fileserver.py::FileserverTest::test_empty_dir_list',
'*test_timezone.py::TimezoneLinuxModuleTest::test_get_hwclock',
'*test_file.py::FileTest::test_managed_check_cmd',
'modules/test_network.py::NetworkTest::test_network_ping', # Bad test implementation
# Needs investigation. Setting them to xfail to have a "new green start" on March 15th
# see https://github.com/SUSE/spacewalk/issues/14284
'states/test_match.py::StateMatchTest::test_issue_2167_ipcidr_no_AttributeError',
'states/test_file.py::FileTest::test_directory_broken_symlink',
'shell/test_matcher.py::MatchTest::test_ipcidr',
'netapi/rest_cherrypy/test_app.py::TestJobs::test_all_jobs',
'netapi/rest_cherrypy/test_app.py::TestAuth::test_webhook_auth',
'modules/test_saltutil.py::SaltUtilModuleTest::test_wheel_just_function',
'modules/test_network.py::NetworkTest::test_network_netstat',
'modules/test_cp.py::CPModuleTest::test_get_dir_templated_paths',
'modules/test_cmdmod.py::CMDModuleTest::test_script_retcode',
'modules/test_cmdmod.py::CMDModuleTest::test_script_cwd_with_space',
'modules/test_cmdmod.py::CMDModuleTest::test_script_cwd',
'modules/test_cmdmod.py::CMDModuleTest::test_script',
'modules/test_cmdmod.py::CMDModuleTest::test_has_exec',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code_with_single_arg',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code_with_multiple_args',
'modules/test_cmdmod.py::CMDModuleTest::test_exec_code',
# Failing in 3003.3
'modules/saltutil/test_wheel.py::test_wheel_just_function',
'modules/test_pip.py::PipModuleTest::test_pip_install_multiple_editables',
'states/test_pip_state.py::PipStateTest::test_issue_2028_pip_installed_state',
'cli/test_matcher.py::test_ipcidr',
],
'rhel6': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'rhel7': [
'states/archive.py::ArchiveTest::test_archive_extracted_skip_verify',
'states/archive.py::ArchiveTest::test_archive_extracted_with_root_user_and_group',
'states/archive.py::ArchiveTest::test_archive_extracted_with_source_hash',
],
'sles11sp3': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'sles11sp4': [
'cloud/providers/virtualbox.py::CreationDestructionVirtualboxTests::test_vm_creation_and_destruction',
'cloud/providers/virtualbox.py::CloneVirtualboxTests::test_create_machine',
'cloud/providers/virtualbox.py::BootVirtualboxTests::test_start_stop',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attribute_with_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_extra_nonexistent_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_imachine_object_default',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_override_attributes',
'cloud/providers/virtualbox.py::XpcomConversionTests::test_unknown_object',
'shell/master.py::MasterTest::test_exit_status_correct_usage',
'states/git.py::GitTest::test_config_set_value_with_space_character',
'states/git.py::GitTest::test_latest',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_develop',
'states/git.py::GitTest::test_latest_changed_local_branch_rev_head',
'states/git.py::GitTest::test_latest_empty_dir',
'states/git.py::GitTest::test_latest_unless_no_cwd_issue_6800',
'states/git.py::GitTest::test_latest_with_local_changes',
'states/git.py::GitTest::test_latest_with_rev_and_submodules',
'states/git.py::GitTest::test_numeric_rev',
'fileserver/roots_test.py::RootsTest::test_symlink_list',
],
'sles12': [
],
'sles12sp1': [
],
'sles12sp2': [
],
'sles12sp3': [
'modules/test_pkg.py::PkgModuleTest::test_mod_del_repo_multiline_values', # this test should not be executed on SUSE systems
],
'sles15': [
'modules/test_pkg.py::PkgModuleTest::test_mod_del_repo_multiline_values', # this test should not be executed on SUSE systems
],
'ubuntu1604': [
'shell/test_enabled.py::EnabledTest::test_shell_default_enabled', # https://github.com/saltstack/salt/issues/52898
'shell/test_enabled.py::EnabledTest::test_template_shell', # https://github.com/saltstack/salt/issues/52898
],
'ubuntu1804': [
'shell/test_enabled.py::EnabledTest::test_shell_default_enabled', # https://github.com/saltstack/salt/issues/52898
'shell/test_enabled.py::EnabledTest::test_template_shell', # https://github.com/saltstack/salt/issues/52898
],
}
}
KNOWN_ISSUES_UNIT = {
'ignore_list': {
'common': [
'test_engines.py', # Make pytest to stuck for long time after tests are executed
'modules/test_boto3_elasticsearch.py',
'zypp_plugins_test.py', # BogusIO missing in zypp_plugin
'netapi/rest_tornado/test_handlers.py',
'netapi/test_rest_tornado.py',
'returners/smtp_return_test.py',
'transport/zeromq_test.py', # Prevent pytests hang after tests
'conf_test.py::ConfTest::test_conf_master_sample_is_commented', # we have uncommented custom config
'conf_test.py::ConfTest::test_conf_minion_sample_is_commented', # we have uncommented custom config
'conf_test.py::ConfTest::test_conf_proxy_sample_is_commented', # we have uncommented custom config
'*rsync_test.py::*',
'test_module_names.py',
'modules/darwin_sysctl_test.py',
'states/boto_cloudwatch_event_test.py',
'modules/boto_vpc_test.py',
'states/boto_vpc_test.py',
'utils/boto_test.py',
'modules/win_ip_test.py::WinShadowTestCase::test_set_static_ip', # takes too long to execute
'states/blockdev_test.py::BlockdevTestCase::test_formatted', # takes too long to execute
'cloud/clouds/dimensiondata_test.py',
'cloud/clouds/gce_test.py',
'*/utils/test_parsers.py',
'*/kitchen/tests/wordpress/*',
'fileserver/test_gitfs.py',
# NEEDS INVESTIGATION
'test_pip.py::PipStateTest::test_install_requirements_parsing',
'*/modules/test_useradd.py',
'utils/cache_mods/cache_mod.py',
'modules/test_boto_vpc.py',
'states/test_boto_vpc.py',
'states/test_augeas.py::AugeasTestCase::test_change_no_context_with_full_path_fail',
# Not running tests for cheetah, mako and genshi templating
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate_text',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_evaluate_xml',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_sanity',
'utils/test_templates.py::RenderTestCase::test_render_cheetah_variable',
'utils/test_templates.py::RenderTestCase::test_render_genshi_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_genshi_evaluate_condition',
'utils/test_templates.py::RenderTestCase::test_render_genshi_sanity',
'utils/test_templates.py::RenderTestCase::test_render_genshi_variable',
'utils/test_templates.py::RenderTestCase::test_render_genshi_variable_replace',
'utils/test_templates.py::RenderTestCase::test_render_mako_evaluate',
'utils/test_templates.py::RenderTestCase::test_render_mako_evaluate_multi',
'utils/test_templates.py::RenderTestCase::test_render_mako_sanity',
'utils/test_templates.py::RenderTestCase::test_render_mako_variable',
# This produces a bad file descriptor error at the end of the testsuite, even if the tests passes
'utils/test_thin.py::SSHThinTestCase::test_gen_thin_compression_fallback_py3',
# contain NO_MOCK which does not exist anymore (throws ImportError)
'cli/test_support.py',
'modules/test_saltsupport.py',
'utils/test_pkg.py',
# duplicated test file, should be removed in favor of the one in tests/pytests/
'tests/unit/modules/test_ansiblegate.py',
# has a broken test, adding it to xfail does not work because it conflicts with tests/unit/utils/test_thin.py
'pytests/unit/utils/test_thin.py',
'transport/test_zeromq.py', # Leaks memory on SLE15SP2
'transport/test_tcp.py',
# Errors in 3003.3
'cloud/test_map.py'
],
'sles11sp4': [
# SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'modules/random_org_test.py',
'states/test_saltutil.py',
],
'rhel6': [
# SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version
'modules/random_org_test.py',
'states/test_saltutil.py',
],
'sles15': [
'utils/cache_mods/cache_mod.py',
'test_zypp_plugins.py',
'modules/test_yumpkg.py',
],
},
'xfail_list': {
'common': [
# fixed in saltstack/develop
# https://github.com/saltstack/salt/commit/7427e192baeccfee69b4887fe0c630a1afb38730#diff-3b5d15bc59b82fc8d4b15f819babf4faR70
'test_core.py::CoreGrainsTestCase::test_parse_etc_os_release',
'test_core.py::CoreGrainsTestCase::test_fqdns_socket_error',
'test_x509.py::X509TestCase::test_private_func__parse_subject',
'test_zypper.py::ZypperTestCase::test_list_pkgs_with_attr',
'test_zfs.py::ZfsUtilsTestCase::test_property_data_zpool',
'templates/jinja_test.py::TestCustomExtensions::test_serialize_yaml_unicode',
# not working in docker containers
'modules/cmdmod_test.py::CMDMODTestCase::test_run',
'conf_test.py::ConfTest::test_conf_cloud_maps_d_files_are_commented',
'conf_test.py::ConfTest::test_conf_cloud_profiles_d_files_are_commented',
'conf_test.py::ConfTest::test_conf_cloud_providers_d_files_are_commented',
'utils/extend_test.py::ExtendTestCase::test_run',
'beacons/glxinfo.py::GLXInfoBeaconTestCase::test_no_user',
'beacons/glxinfo.py::GLXInfoBeaconTestCase::test_non_dict_config',
# Boto failing tests
'modules/boto_apigateway_test.py::BotoApiGatewayTestCaseBase::runTest',
'modules/boto_cloudwatch_event_test.py::BotoCloudWatchEventTestCaseBase::runTest',
'modules/boto_cognitoidentity_test.py::BotoCognitoIdentityTestCaseBase::runTest',
'modules/boto_elasticsearch_domain_test.py::BotoElasticsearchDomainTestCaseBase::runTest',
'states/boto_apigateway_test.py::BotoApiGatewayStateTestCaseBase::runTest',
'states/boto_cognitoidentity_test.py::BotoCognitoIdentityStateTestCaseBase::runTest',
'states/boto_elasticsearch_domain_test.py::BotoElasticsearchDomainStateTestCaseBase::runTest',
'modules/inspect_collector_test.py::InspectorCollectorTestCase::test_file_tree',
'*CoreGrainsTestCase::test_linux_memdata',
'EtcdModTestCase',
'ConfTest::test_conf_master_sample_is_commented', # this is not passing because we have custom config by default (user "salt")
'test_cmdmod.py::CMDMODTestCase::test_run',
'fileserver/test_roots.py::RootsTest::test_symlink_list',
'modules/test_cmdmod.py::CMDMODTestCase::test_run', # test too slow
'*test_reactor.py::TestReactor::test_reactions',
'*test_reactor.py::TestReactor::test_list_reactors',
'*test_yumpkg.py::YumTestCase::test_list_pkgs_with_attr',
'*test_local_cache.py::Local_CacheTest::test_clean_old_jobs',
'*test_local_cache.py::Local_CacheTest::test_not_clean_new_jobs',
'*test_jinja.py::TestCustomExtensions::test_http_query',
'*test_conf.py::ConfTest::test_conf_master_sample_is_commented',
# After switch to M2Crypto
'modules/test_x509.py::X509TestCase::test_create_crl', # No OpenSSL available
'modules/test_x509.py::X509TestCase::test_revoke_certificate_with_crl', # No OpenSSL available
# Fails due to the async batch changes
'transport/test_ipc.py::IPCMessagePubSubCase::test_multi_client_reading',
# Needs investigation. Setting them to xfail to have a "new green start" on March 12th
# https://github.com/SUSE/spacewalk/issues/14263
'utils/test_jinja.py::TestCustomExtensions::test_json_query',
'utils/test_data.py::DataTestCase::test_json_query',
'states/test_syslog_ng.py::SyslogNGTestCase::test_started_state_generate_valid_cli_command',
'states/test_pip_state.py::PipStateTest::test_install_requirements_parsing',
'states/test_network.py::NetworkTestCase::test_managed',
'modules/test_zypperpkg.py::ZypperTestCase::test_upgrade_success',
'modules/test_zypperpkg.py::ZypperTestCase::test_search_not_found',
'modules/test_zypperpkg.py::ZypperTestCase::test_add_repo_key_path',
'modules/test_state.py::StateTestCase::test_show_sls',
'modules/test_serverdensity_device.py::ServerdensityDeviceTestCase::test_create',
'modules/test_redismod.py::RedismodTestCase::test_shutdown',
'modules/test_redismod.py::RedismodTestCase::test_ping',
'modules/test_netscaler.py::NetscalerTestCase::test_service_enable',
'modules/test_netscaler.py::NetscalerTestCase::test_service_disable',
'modules/test_keystone.py::KeystoneTestCase::test_user_get',
'modules/test_keystone.py::KeystoneTestCase::test_user_create',
'modules/test_keystone.py::KeystoneTestCase::test_tenant_get',
'modules/test_keystone.py::KeystoneTestCase::test_tenant_create',
'modules/test_keystone.py::KeystoneTestCase::test_role_get',
'modules/test_dpkg_lowpkg.py::DpkgTestCase::test_info',
'modules/test_cron.py::PsTestCase::test_list_tab',
'modules/test_aptpkg.py::AptPkgTestCase::test_info_installed_attr_without_status',
'grains/test_core.py::CoreGrainsTestCase::test_fqdn_return',
'grains/test_core.py::CoreGrainsTestCase::test_fqdn4_empty',
'cloud/clouds/test_ec2.py::EC2TestCase::test_termination_protection_exception',
'cloud/clouds/test_ec2.py::EC2TestCase::test_termination_protection',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_start_on_gather_job_timeout',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_start_on_batch_presence_ping_timeout',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_next',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch_close_safe',
'cli/test_batch_async.py::AsyncBatchTestCase::test_batch__del__',
'beacons/test_cert_info.py::CertInfoBeaconTestCase::test_cert_information',
# These also need investigation, setting to xfail for a green start for 3002.2
'test_ext.py::VendorTornadoTest::test_vendored_tornado_import',
'test_loader.py::LoaderGlobalsTest::test_auth',
'test_loader.py::LoaderGlobalsTest::test_outputters',
'test_loader.py::LoaderGlobalsTest::test_pillars',
'test_loader.py::LoaderGlobalsTest::test_renderers',
'test_loader.py::LoaderGlobalsTest::test_returners',
'test_loader.py::LoaderGlobalsTest::test_runners',
'test_loader.py::LoaderGlobalsTest::test_serializers',
'test_loader.py::LoaderGlobalsTest::test_tops',
'grains/test_core.py::CoreGrainsTestCase::test_core_virtual_invalid',
'grains/test_core.py::CoreGrainsTestCase::test_core_virtual_unicode',
'grains/test_core.py::CoreGrainsTestCase::test_get_server_id',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'modules/test_aptpkg.py::AptPkgTestCase::test_list_repos',
'modules/test_parted_partition.py::PartedTestCase::test__is_fstype',
'modules/test_parted_partition.py::PartedTestCase::test_mkpartfs_to_mkpart',
'modules/test_zypperpkg.py::ZypperTestCase::test_list_pkgs_with_attr',
'utils/test_vmware.py::PrivateGetServiceInstanceTestCase::test_second_attempt_successful_connection',
'utils/test_vmware.py::PrivateGetServiceInstanceTestCase::test_third_attempt_successful_connection',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_default_params',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_no_cached_service_instance_same_host_on_proxy',
'utils/test_vmware.py::GetServiceInstanceTestCase::test_uncached_service_instance',
'pytests/unit/modules/test_ansiblegate.py::test_ansible_module_call',
# Failing on 3003.3
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_no_updates',
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_telegram_return_no_updates_for_user',
'beacons/test_telegram_bot_msg.py::TelegramBotMsgBeaconTestCase::test_call_telegram_returning_updates',
'modules/test_junos.py::Test_Junos_Module::test_get_table_api_error',
'modules/test_junos.py::Test_Junos_Module::test_get_table_connect_closed_error',
'modules/test_junos.py::Test_Junos_Module::test_get_table_inventory',
'modules/test_junos.py::Test_Junos_Module::test_get_table_no_path_inventory',
'modules/test_zcbuildout.py::BuildoutTestCase::test_get_bootstrap_url',
'modules/test_zcbuildout.py::BuildoutTestCase::test_get_buildout_ver',
'modules/test_zfs.py::ZfsTestCase::test_bookmark_success',
'modules/test_aptpkg.py::AptPkgTestCase::test_expand_repo_def',
'modules/test_cmdmod.py::test_run_cwd_in_combination_with_runas', # Fails on docker container
'states/test_pkgrepo.py::test_migrated_wrong_method',
],
'sles12sp1': [
'cloud/clouds/dimensiondata_test.py::DimensionDataTestCase::test_avail_sizes',
],
'sles12sp2': [
'cloud/clouds/dimensiondata_test.py::DimensionDataTestCase::test_avail_sizes',
],
'2016.11.4': [
'*network_test.py::NetworkTestCase::test_host_to_ips',
],
'sles15': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
],
'ubuntu1604': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
# Needs investigation. Setting them to xfail to have a "new green start" on March 19th
# https://github.com/SUSE/spacewalk/issues/14263
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_specified_archive_not_found_failure',
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_last_picked_archive_not_found_failure',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'cli/test_support.py::ProfileIntegrityTestCase::test_users_template_profile',
'cli/test_support.py::ProfileIntegrityTestCase::test_non_template_profiles_parseable',
'cli/test_support.py::ProfileIntegrityTestCase::test_jobs_trace_template_profile',
'transport/test_zeromq.py::PubServerChannel::test_issue_36469_tcp',
],
'ubuntu1804': [
'utils/test_args.py::ArgsTestCase::test_argspec_report', # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852
# Needs investigation. Setting them to xfail to have a "new green start" on March 19th
# https://github.com/SUSE/spacewalk/issues/14263
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_specified_archive_not_found_failure',
'modules/test_saltsupport.py::SaltSupportModuleTestCase::test_sync_last_picked_archive_not_found_failure',
'modules/test_aptpkg.py::AptPkgTestCase::test_add_repo_key_failed',
'cli/test_support.py::ProfileIntegrityTestCase::test_users_template_profile',
'cli/test_support.py::ProfileIntegrityTestCase::test_non_template_profiles_parseable',
'cli/test_support.py::ProfileIntegrityTestCase::test_jobs_trace_template_profile',
# These also need investigation, setting to xfail for a green start for 3002.2
'transport/test_tcp.py::ClearReqTestCases::test_badload',
'transport/test_tcp.py::ClearReqTestCases::test_basic',
'transport/test_tcp.py::ClearReqTestCases::test_normalization',
'transport/test_tcp.py::AESReqTestCases::test_basic',
'transport/test_tcp.py::AESReqTestCases::test_normalization',
'transport/test_zeromq.py::ClearReqTestCases::test_badload',
'transport/test_zeromq.py::ClearReqTestCases::test_basic',
'transport/test_zeromq.py::ClearReqTestCases::test_normalization',
],
# ip_addrs() needs to be mocked for deterministic tests
"opensuse151": ['pytests/unit/utils/test_minions.py'],
"opensuse152": ['pytests/unit/utils/test_minions.py'],
"opensuse153": ['pytests/unit/utils/test_minions.py'],
}
}
KNOWN_ISSUES = {
'integration': KNOWN_ISSUES_INTEGRATION,
'unit': KNOWN_ISSUES_UNIT
}
def get_list(config, name):
version = os.environ.get('DISTRO')
flavor = os.environ.get('FLAVOR')
tests_type = config.getini('tests_type')
assert name in ['ignore_list', 'xfail_list']
result = (
KNOWN_ISSUES[tests_type][name].get('common', []) +
KNOWN_ISSUES[tests_type][name].get(flavor, []) +
KNOWN_ISSUES[tests_type][name].get(version, []) +
KNOWN_ISSUES[tests_type][name].get(
'{0}/{1}'.format(version, flavor), []) +
KNOWN_ISSUES[tests_type][name].get(
'{0}/{1}'.format(version, config.salt_version), []) +
KNOWN_ISSUES[tests_type][name].get(config.salt_version, [])
)
return ['*%s*' % it for it in result]
def pytest_ignore_collect(path, config):
return any(map(path.fnmatch, config.ignore_list))
def pytest_itemcollected(item):
matcher = partial(fnmatch, item.nodeid)
if any(map(matcher, item.config.xfail_list)):
item.add_marker(pytest.mark.xfail, "Xfailed by toaster")
elif any(map(matcher, item.config.ignore_list)):
item.add_marker(pytest.mark.skip, "Ignore by toaster") | en | 0.704577 | :codeauthor: <NAME> (<EMAIL>) tests.conftest ~~~~~~~~~~~~~~ Prepare py.test for our test suite # pylint: disable=wrong-import-order,wrong-import-position,3rd-party-local-module-not-gated # pylint: disable=redefined-outer-name,invalid-name,3rd-party-module-not-gated # pylint: disable=blacklisted-module # pylint: disable=unused-wildcard-import # # Toaster specifics # Change to code checkout directory # Make sure the current directory is the first item in sys.path # Coverage # Flag coverage to track suprocesses by pointing it to the right .coveragerc file # Define the pytest plugins we rely on # Define where not to collect tests from # Patch PyTest logging handlers Subclassing PyTest's LogCaptureHandler in order to add the exc_info_on_loglevel functionality and actually make it a NullHandler, it's only used to print log messages emmited during tests, which we have explicitly disabled in pytest.ini Subclassing PyTest's LiveLoggingStreamHandler in order to add the exc_info_on_loglevel functionality. # Reset logging root handlers # Reset the root logger to its default level(because salt changed it) # ----- PyTest Tempdir Plugin Hooks ---------------------------------------------------------------------------------> Return the temporary directory basename for the salt test suite. # <---- PyTest Tempdir Plugin Hooks ---------------------------------------------------------------------------------- # ----- CLI Options Setup -------------------------------------------------------------------------------------------> register argparse-style options and ini-style config values. # Add deprecated CLI flag until we completely switch to PyTest # ----- Test Groups ---------------------------------------------------------------------------------------------> # This will allow running the tests in chunks # <---- Test Groups ---------------------------------------------------------------------------------------------- # Toaster specific # <---- CLI Options Setup -------------------------------------------------------------------------------------------- # ----- Register Markers --------------------------------------------------------------------------------------------> called after command line options have been parsed and all plugins and initial conftest files been loaded. # Expose the markers we use to pytest CLI # Make sure the test suite "knows" this is a pytest test run # "Flag" the slotTest decorator if we're skipping slow tests or not # Toaster specific # <---- Register Markers --------------------------------------------------------------------------------------------- # ----- PyTest Tweaks -----------------------------------------------------------------------------------------------> # Get current limits # Check minimum required limits # Increase limits # pylint: disable=broad-except called after collection has been performed, may filter or re-order the items in-place. :param _pytest.main.Session session: the pytest session object :param _pytest.config.Config config: pytest config object :param List[_pytest.nodes.Item] items: list of item objects # Let PyTest or other plugins handle the initial collection # The test module is within the same package that the fixture is # pylint: disable=broad-except implements the runtest_setup/call/teardown protocol for the given test item, including capturing exceptions and calling reporting hooks. :arg item: test item for which the runtest protocol is performed. :arg nextitem: the scheduled-to-be-next test item (or None if this is the end my friend). This argument is passed on to :py:func:`pytest_runtest_teardown`. :return boolean: True if no further hook implementations should be invoked. Stops at first non-None result, see :ref:`firstresult` # Run the test # <---- PyTest Tweaks ------------------------------------------------------------------------------------------------ # ----- Test Setup --------------------------------------------------------------------------------------------------> Fixtures injection based on markers or test skips based on CLI arguments # Unit tests are whitelisted on windows by default, so, we're only # after all other tests # <---- Test Setup --------------------------------------------------------------------------------------------------- # ----- Test Groups Selection ---------------------------------------------------------------------------------------> Calculate group size and start index. Get the items from the passed in group based on group size. # We're not selection tests using groups, don't do any filtering # Replace all items in the list #{} ({} tests)\n".format(group_id, len(items)), yellow=True, # <---- Test Groups Selection ---------------------------------------------------------------------------------------- # ----- Fixtures Overrides ------------------------------------------------------------------------------------------> Return a dictionary with the keyworkd arguments for FactoriesManager # <---- Fixtures Overrides ------------------------------------------------------------------------------------------- # ----- Salt Factories ----------------------------------------------------------------------------------------------> Fixture which returns the salt integration files directory path. Creates the directory if it does not yet exist. Fixture which returns the salt state tree root directory path. Creates the directory if it does not yet exist. Fixture which returns the salt pillar tree root directory path. Creates the directory if it does not yet exist. Fixture which returns the salt base environment state tree directory path. Creates the directory if it does not yet exist. Fixture which returns the salt prod environment state tree directory path. Creates the directory if it does not yet exist. Fixture which returns the salt base environment pillar tree directory path. Creates the directory if it does not yet exist. Fixture which returns the salt prod environment pillar tree directory path. Creates the directory if it does not yet exist. # We need to copy the extension modules into the new master root_dir or # it will be prefixed by it # Copy the autosign_file to the new master root_dir # all read, only owner write # Alternate root to test __env__ choices # We need to copy the extension modules into the new master root_dir or # it will be prefixed by it # Copy the autosign_file to the new master root_dir # all read, only owner write # Alternate root to test __env__ choices # Let's copy over the test cloud config files and directories into the running master config directory # Make sure unittest2 uses the pytest generated configuration # Make sure unittest2 classes know their paths # Turn strict modes off so that we can operate in /tmp # Logging # Authentication: # Don't read the user's ~/.rhosts and ~/.shosts files # To enable empty passwords, change to yes (NOT RECOMMENDED) # Change to yes to enable challenge-response passwords (beware issues with # some PAM modules and threads) # Change to no to disable tunnelled clear text passwords # We also need a salt-ssh roster config file \ localhost: host: 127.0.0.1 port: {} user: {} mine_functions: test.arg: ['itworked'] # <---- Salt Factories ----------------------------------------------------------------------------------------------- # ----- From Filenames Test Selection -------------------------------------------------------------------------------> # Don't do anything # In this case, this path is considered to be a file containing a line separated list # of files to consider # Let's add the match all rule # Let's now go through the list of files gathered # Tests in the listing don't require additional matching and will be added to the # list of tests to run # No direct macthing # Now let's try a direct match between the passed file and possible test modules # Do we have an entry in tests/filename_map.yml # This is regex # Glob matching # Direct file paths as rules # <---- From Filenames Test Selection -------------------------------------------------------------------------------- # ----- Custom Fixtures ---------------------------------------------------------------------------------------------> # Run tests # Reverse the order, sublings first, parents after # Give up spins up an https webserver. # <---- Custom Fixtures ---------------------------------------------------------------------------------------------- # should no be included # not yet implemented # these are not actual tests # Running following tests causes unsuccessfully close # of forked processes. This will cause "hanging" jenkins jobs. # Evil test # This test causes "py.test" never finishes # 'runners/fileserver.py::FileserverTest::test_clear_file_list_cache', # this test hangs # workaround for comment above # 'wheel/key.py::KeyWheelModuleTest::test_list_all', # ERROR at teardown # workaround for comment above # We are not interested in the NetapiClientTests # This makes a request to github.com # CRON is not installed on toaster images and cron tests are not designed for SUSE. # NEED INVESTIGATION # proxy minion is not starting # After switch to M2Crypto # ModuleNotFoundError: No module named 'Crypto' # Avoid error due: # [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version # disable 2017.7.1 on python 2.6 # Always failing # Sometimes failing in jenkins. # sles12sp1 # sles12sp1 # sles12sp1 rhel7 # sles12sp2 # Bad test implementation # Needs investigation. Setting them to xfail to have a "new green start" on March 15th # see https://github.com/SUSE/spacewalk/issues/14284 # Failing in 3003.3 # this test should not be executed on SUSE systems # this test should not be executed on SUSE systems # https://github.com/saltstack/salt/issues/52898 # https://github.com/saltstack/salt/issues/52898 # https://github.com/saltstack/salt/issues/52898 # https://github.com/saltstack/salt/issues/52898 # Make pytest to stuck for long time after tests are executed # BogusIO missing in zypp_plugin # Prevent pytests hang after tests # we have uncommented custom config # we have uncommented custom config # we have uncommented custom config # takes too long to execute # takes too long to execute # NEEDS INVESTIGATION # Not running tests for cheetah, mako and genshi templating # This produces a bad file descriptor error at the end of the testsuite, even if the tests passes # contain NO_MOCK which does not exist anymore (throws ImportError) # duplicated test file, should be removed in favor of the one in tests/pytests/ # has a broken test, adding it to xfail does not work because it conflicts with tests/unit/utils/test_thin.py # Leaks memory on SLE15SP2 # Errors in 3003.3 # SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version # SSLError: [Errno 1] _ssl.c:492: error:1409442E:SSL routines:SSL3_READ_BYTES:tlsv1 alert protocol version # fixed in saltstack/develop # https://github.com/saltstack/salt/commit/7427e192baeccfee69b4887fe0c630a1afb38730#diff-3b5d15bc59b82fc8d4b15f819babf4faR70 # not working in docker containers # Boto failing tests # this is not passing because we have custom config by default (user "salt") # test too slow # After switch to M2Crypto # No OpenSSL available # No OpenSSL available # Fails due to the async batch changes # Needs investigation. Setting them to xfail to have a "new green start" on March 12th # https://github.com/SUSE/spacewalk/issues/14263 # These also need investigation, setting to xfail for a green start for 3002.2 # Failing on 3003.3 # Fails on docker container # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852 # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852 # Needs investigation. Setting them to xfail to have a "new green start" on March 19th # https://github.com/SUSE/spacewalk/issues/14263 # Bad tests, fixed at https://github.com/saltstack/salt/pull/52852 # Needs investigation. Setting them to xfail to have a "new green start" on March 19th # https://github.com/SUSE/spacewalk/issues/14263 # These also need investigation, setting to xfail for a green start for 3002.2 # ip_addrs() needs to be mocked for deterministic tests | 1.790505 | 2 |
examples/test_reconnecting_client.py | gf0842wf/gu | 3 | 6612329 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test reconnecting client"""
__author__ = 'wangfei'
__date__ = '2015/03/06'
from gu.protocol import Protocol
import gevent
import logging
from gevent.socket import create_connection
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s %(levelname)s:%(module)s] %(message)s')
class EchoClientProtocol(Protocol):
def connection_made(self):
logger.info('connection made')
self.send_data('ooxx')
def data_received(self, data):
logger.debug('data received: %s', data)
self.send_data(data)
gevent.sleep(2)
def connection_lost(self, reason):
logger.info('connection lost')
super(EchoClientProtocol, self).connection_lost(reason)
reconnect()
def reconnect():
while True:
logger.info('try reconnect..')
try:
s = create_connection(('127.0.0.1', 6000))
except:
gevent.sleep(5)
continue
logger.info('reconnected.')
gevent.spawn(EchoClientProtocol, s, None)
break
s = create_connection(('127.0.0.1', 6000))
gevent.spawn(EchoClientProtocol, s, None)
gevent.wait()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""test reconnecting client"""
__author__ = 'wangfei'
__date__ = '2015/03/06'
from gu.protocol import Protocol
import gevent
import logging
from gevent.socket import create_connection
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)-15s %(levelname)s:%(module)s] %(message)s')
class EchoClientProtocol(Protocol):
def connection_made(self):
logger.info('connection made')
self.send_data('ooxx')
def data_received(self, data):
logger.debug('data received: %s', data)
self.send_data(data)
gevent.sleep(2)
def connection_lost(self, reason):
logger.info('connection lost')
super(EchoClientProtocol, self).connection_lost(reason)
reconnect()
def reconnect():
while True:
logger.info('try reconnect..')
try:
s = create_connection(('127.0.0.1', 6000))
except:
gevent.sleep(5)
continue
logger.info('reconnected.')
gevent.spawn(EchoClientProtocol, s, None)
break
s = create_connection(('127.0.0.1', 6000))
gevent.spawn(EchoClientProtocol, s, None)
gevent.wait() | en | 0.529565 | #!/usr/bin/env python # -*- coding: utf-8 -*- test reconnecting client | 2.733182 | 3 |
examples/plot_traj_averages.py | hheenen/SimSoliqTools | 0 | 6612330 | <gh_stars>0
#!/usr/bin/env python
import numpy as np
from simsoliq.io import init_mdtraj
from simsoliq.mdtraj_average import average_energies, sort_energies, average_densities
from simsoliq.plotting.ensemble_plots import plot_running_av_ensemble
from simsoliq.plotting.standard_plots import plot_density
if __name__ == "__main__":
# read trajectories (use restart as individual traj)
trj1 = init_mdtraj("../tests/data/Pt111_24H2O_x/vasprun.xml", fmat='vasp')
trj2 = init_mdtraj("../tests/data/Pt111_24H2O_x/restart/vasprun.xml", fmat='vasp')
###################################################################
# mean and standard deviation of trajectories sorted by composition
# (only one composition included here)
e_val = average_energies([trj1, trj2], tstart=0)
for ek in e_val:
print(ek, e_val[ek])
###################################################################
###################################################################
# return data for energies and plot all running averages
edat = sort_energies([trj1, trj2])
# artificially elongate data for more interesting plot
for ek in edat:
if ek not in ['timestep','timeunit']:
for c in edat[ek]:
for i in range(len(edat[ek][c])):
edat[ek][c][i] = np.array(1000*edat[ek][c][i].tolist())
# add artificial entry for different systems
for ek in edat:
if ek not in ['timestep','timeunit']:
edat[ek].update({"Au36_24H2O":[edat[ek]['Pt36_24H2O'][i] + 150]})
edat[ek].update({"Pt36_24H2O_CO":[edat[ek]['Pt36_24H2O'][i] -1.0]})
# plot running averages of total energy- starting averaging after tstart
timedat = {k:edat[k] for k in ['timestep','timeunit']}
plot_running_av_ensemble("ensemble_running_averages_etot", edat['etot'], \
surftag = {}, tstart=0.5, tunit='ps', **timedat)
###################################################################
| #!/usr/bin/env python
import numpy as np
from simsoliq.io import init_mdtraj
from simsoliq.mdtraj_average import average_energies, sort_energies, average_densities
from simsoliq.plotting.ensemble_plots import plot_running_av_ensemble
from simsoliq.plotting.standard_plots import plot_density
if __name__ == "__main__":
# read trajectories (use restart as individual traj)
trj1 = init_mdtraj("../tests/data/Pt111_24H2O_x/vasprun.xml", fmat='vasp')
trj2 = init_mdtraj("../tests/data/Pt111_24H2O_x/restart/vasprun.xml", fmat='vasp')
###################################################################
# mean and standard deviation of trajectories sorted by composition
# (only one composition included here)
e_val = average_energies([trj1, trj2], tstart=0)
for ek in e_val:
print(ek, e_val[ek])
###################################################################
###################################################################
# return data for energies and plot all running averages
edat = sort_energies([trj1, trj2])
# artificially elongate data for more interesting plot
for ek in edat:
if ek not in ['timestep','timeunit']:
for c in edat[ek]:
for i in range(len(edat[ek][c])):
edat[ek][c][i] = np.array(1000*edat[ek][c][i].tolist())
# add artificial entry for different systems
for ek in edat:
if ek not in ['timestep','timeunit']:
edat[ek].update({"Au36_24H2O":[edat[ek]['Pt36_24H2O'][i] + 150]})
edat[ek].update({"Pt36_24H2O_CO":[edat[ek]['Pt36_24H2O'][i] -1.0]})
# plot running averages of total energy- starting averaging after tstart
timedat = {k:edat[k] for k in ['timestep','timeunit']}
plot_running_av_ensemble("ensemble_running_averages_etot", edat['etot'], \
surftag = {}, tstart=0.5, tunit='ps', **timedat)
################################################################### | de | 0.35781 | #!/usr/bin/env python # read trajectories (use restart as individual traj) ################################################################### # mean and standard deviation of trajectories sorted by composition # (only one composition included here) ################################################################### ################################################################### # return data for energies and plot all running averages # artificially elongate data for more interesting plot # add artificial entry for different systems # plot running averages of total energy- starting averaging after tstart ################################################################### | 2.118912 | 2 |
grouper/background/background_processor.py | aneeq009/merou | 58 | 6612331 | import logging
import os
import sys
from collections import defaultdict
from contextlib import closing
from datetime import datetime
from time import sleep
from typing import TYPE_CHECKING
from sqlalchemy import and_
from grouper.audit import get_auditors_group
from grouper.constants import PERMISSION_AUDITOR
from grouper.email_util import (
notify_edge_expiration,
notify_nonauditor_promoted,
process_async_emails,
)
from grouper.entities.group_edge import APPROVER_ROLE_INDICES
from grouper.graph import Graph
from grouper.models.base.session import Session
from grouper.models.group import Group
from grouper.models.group_edge import GroupEdge
from grouper.models.user import User
from grouper.perf_profile import prune_old_traces
if TYPE_CHECKING:
from grouper.background.settings import BackgroundSettings
from grouper.plugin.proxy import PluginProxy
from typing import Dict, NoReturn, Set
class BackgroundProcessor:
"""Background process for running periodic tasks.
Currently, this sends asynchronous mail messages and handles edge expiration and notification.
"""
def __init__(self, settings, plugins):
# type: (BackgroundSettings, PluginProxy) -> None
"""Initialize new BackgroundProcessor"""
self.settings = settings
self.plugins = plugins
self.logger = logging.getLogger(__name__)
def crash(self):
# type: () -> NoReturn
os._exit(1)
def expire_edges(self, session):
# type: (Session) -> None
"""Mark expired edges as inactive and log to the audit log.
Edges are immediately excluded from the permission graph once they've
expired, but we also want to note the expiration in the audit log and send
an email notification. This function finds all expired edges, logs the
expiration to the audit log, and sends a notification message. It's meant
to be run from the background processing thread.
"""
now = datetime.utcnow()
# Pull the expired edges.
edges = (
session.query(GroupEdge)
.filter(
GroupEdge.group_id == Group.id,
Group.enabled == True,
GroupEdge.active == True,
and_(GroupEdge.expiration <= now, GroupEdge.expiration != None),
)
.all()
)
# Expire each one.
for edge in edges:
notify_edge_expiration(self.settings, session, edge)
edge.active = False
session.commit()
def promote_nonauditors(self, session):
# type: (Session) -> None
"""Checks all enabled audited groups and ensures that all approvers for that group have
the PERMISSION_AUDITOR permission. All non-auditor approvers of audited groups will be
promoted to be auditors, i.e., added to the auditors group.
Args:
session (Session): database session
"""
graph = Graph()
# Hack to ensure the graph is loaded before we access it
graph.update_from_db(session)
# map from user object to names of audited groups in which
# user is a nonauditor approver
nonauditor_approver_to_groups = defaultdict(set) # type: Dict[User, Set[str]]
user_is_auditor = {} # type: Dict[str, bool]
for group_tuple in graph.get_groups(audited=True, directly_audited=False):
group_md = graph.get_group_details(group_tuple.name, expose_aliases=False)
for username, user_md in group_md["users"].items():
if username not in user_is_auditor:
user_perms = graph.get_user_details(username)["permissions"]
user_is_auditor[username] = any(
[p["permission"] == PERMISSION_AUDITOR for p in user_perms]
)
if user_is_auditor[username]:
# user is already auditor so can skip
continue
if user_md["role"] in APPROVER_ROLE_INDICES:
# non-auditor approver. BAD!
nonauditor_approver_to_groups[username].add(group_tuple.name)
if nonauditor_approver_to_groups:
auditors_group = get_auditors_group(self.settings, session)
for username, group_names in nonauditor_approver_to_groups.items():
reason = "auto-added due to having approver role(s) in group(s): {}".format(
", ".join(group_names)
)
user = User.get(session, name=username)
assert user
auditors_group.add_member(user, user, reason, status="actioned")
notify_nonauditor_promoted(
self.settings, session, user, auditors_group, group_names
)
session.commit()
def run(self):
# type: () -> None
initial_url = self.settings.database
while True:
try:
if self.settings.database != initial_url:
self.crash()
with closing(Session()) as session:
self.logger.info("Expiring edges....")
self.expire_edges(session)
self.logger.info("Promoting nonauditor approvers in audited groups...")
self.promote_nonauditors(session)
self.logger.info("Sending emails...")
process_async_emails(self.settings, session, datetime.utcnow())
self.logger.info("Pruning old traces....")
prune_old_traces(session)
session.commit()
self.plugins.log_background_run(success=True)
except Exception:
self.plugins.log_background_run(success=False)
self.plugins.log_exception(None, None, *sys.exc_info())
self.logger.exception("Unexpected exception occurred in background thread")
self.crash()
self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval))
sleep(self.settings.sleep_interval)
| import logging
import os
import sys
from collections import defaultdict
from contextlib import closing
from datetime import datetime
from time import sleep
from typing import TYPE_CHECKING
from sqlalchemy import and_
from grouper.audit import get_auditors_group
from grouper.constants import PERMISSION_AUDITOR
from grouper.email_util import (
notify_edge_expiration,
notify_nonauditor_promoted,
process_async_emails,
)
from grouper.entities.group_edge import APPROVER_ROLE_INDICES
from grouper.graph import Graph
from grouper.models.base.session import Session
from grouper.models.group import Group
from grouper.models.group_edge import GroupEdge
from grouper.models.user import User
from grouper.perf_profile import prune_old_traces
if TYPE_CHECKING:
from grouper.background.settings import BackgroundSettings
from grouper.plugin.proxy import PluginProxy
from typing import Dict, NoReturn, Set
class BackgroundProcessor:
"""Background process for running periodic tasks.
Currently, this sends asynchronous mail messages and handles edge expiration and notification.
"""
def __init__(self, settings, plugins):
# type: (BackgroundSettings, PluginProxy) -> None
"""Initialize new BackgroundProcessor"""
self.settings = settings
self.plugins = plugins
self.logger = logging.getLogger(__name__)
def crash(self):
# type: () -> NoReturn
os._exit(1)
def expire_edges(self, session):
# type: (Session) -> None
"""Mark expired edges as inactive and log to the audit log.
Edges are immediately excluded from the permission graph once they've
expired, but we also want to note the expiration in the audit log and send
an email notification. This function finds all expired edges, logs the
expiration to the audit log, and sends a notification message. It's meant
to be run from the background processing thread.
"""
now = datetime.utcnow()
# Pull the expired edges.
edges = (
session.query(GroupEdge)
.filter(
GroupEdge.group_id == Group.id,
Group.enabled == True,
GroupEdge.active == True,
and_(GroupEdge.expiration <= now, GroupEdge.expiration != None),
)
.all()
)
# Expire each one.
for edge in edges:
notify_edge_expiration(self.settings, session, edge)
edge.active = False
session.commit()
def promote_nonauditors(self, session):
# type: (Session) -> None
"""Checks all enabled audited groups and ensures that all approvers for that group have
the PERMISSION_AUDITOR permission. All non-auditor approvers of audited groups will be
promoted to be auditors, i.e., added to the auditors group.
Args:
session (Session): database session
"""
graph = Graph()
# Hack to ensure the graph is loaded before we access it
graph.update_from_db(session)
# map from user object to names of audited groups in which
# user is a nonauditor approver
nonauditor_approver_to_groups = defaultdict(set) # type: Dict[User, Set[str]]
user_is_auditor = {} # type: Dict[str, bool]
for group_tuple in graph.get_groups(audited=True, directly_audited=False):
group_md = graph.get_group_details(group_tuple.name, expose_aliases=False)
for username, user_md in group_md["users"].items():
if username not in user_is_auditor:
user_perms = graph.get_user_details(username)["permissions"]
user_is_auditor[username] = any(
[p["permission"] == PERMISSION_AUDITOR for p in user_perms]
)
if user_is_auditor[username]:
# user is already auditor so can skip
continue
if user_md["role"] in APPROVER_ROLE_INDICES:
# non-auditor approver. BAD!
nonauditor_approver_to_groups[username].add(group_tuple.name)
if nonauditor_approver_to_groups:
auditors_group = get_auditors_group(self.settings, session)
for username, group_names in nonauditor_approver_to_groups.items():
reason = "auto-added due to having approver role(s) in group(s): {}".format(
", ".join(group_names)
)
user = User.get(session, name=username)
assert user
auditors_group.add_member(user, user, reason, status="actioned")
notify_nonauditor_promoted(
self.settings, session, user, auditors_group, group_names
)
session.commit()
def run(self):
# type: () -> None
initial_url = self.settings.database
while True:
try:
if self.settings.database != initial_url:
self.crash()
with closing(Session()) as session:
self.logger.info("Expiring edges....")
self.expire_edges(session)
self.logger.info("Promoting nonauditor approvers in audited groups...")
self.promote_nonauditors(session)
self.logger.info("Sending emails...")
process_async_emails(self.settings, session, datetime.utcnow())
self.logger.info("Pruning old traces....")
prune_old_traces(session)
session.commit()
self.plugins.log_background_run(success=True)
except Exception:
self.plugins.log_background_run(success=False)
self.plugins.log_exception(None, None, *sys.exc_info())
self.logger.exception("Unexpected exception occurred in background thread")
self.crash()
self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval))
sleep(self.settings.sleep_interval)
| en | 0.859716 | Background process for running periodic tasks. Currently, this sends asynchronous mail messages and handles edge expiration and notification. # type: (BackgroundSettings, PluginProxy) -> None Initialize new BackgroundProcessor # type: () -> NoReturn # type: (Session) -> None Mark expired edges as inactive and log to the audit log. Edges are immediately excluded from the permission graph once they've expired, but we also want to note the expiration in the audit log and send an email notification. This function finds all expired edges, logs the expiration to the audit log, and sends a notification message. It's meant to be run from the background processing thread. # Pull the expired edges. # Expire each one. # type: (Session) -> None Checks all enabled audited groups and ensures that all approvers for that group have the PERMISSION_AUDITOR permission. All non-auditor approvers of audited groups will be promoted to be auditors, i.e., added to the auditors group. Args: session (Session): database session # Hack to ensure the graph is loaded before we access it # map from user object to names of audited groups in which # user is a nonauditor approver # type: Dict[User, Set[str]] # type: Dict[str, bool] # user is already auditor so can skip # non-auditor approver. BAD! # type: () -> None | 2.057189 | 2 |
src/utils/client.py | jroberts07/fpl-stats-api | 0 | 6612332 | import aiohttp
import ssl
def get_session():
"""Creates an aiohhtp session.
Returns:
obj: Aiohttp client session.
"""
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
conn = aiohttp.TCPConnector(ssl=ssl_ctx)
return aiohttp.ClientSession(connector=conn)
| import aiohttp
import ssl
def get_session():
"""Creates an aiohhtp session.
Returns:
obj: Aiohttp client session.
"""
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.CERT_NONE
conn = aiohttp.TCPConnector(ssl=ssl_ctx)
return aiohttp.ClientSession(connector=conn)
| en | 0.242976 | Creates an aiohhtp session. Returns: obj: Aiohttp client session. | 2.77975 | 3 |
Utils/_script_template_docker.py | ddi-danielsantander/content | 7 | 6612333 | %%COMMONSCRIPT%%
%%USERSCRIPT%%
%%SCRIPT%% | %%COMMONSCRIPT%%
%%USERSCRIPT%%
%%SCRIPT%% | none | 1 | 1.023059 | 1 | |
code/data_batcher.py | trthanhquang/QA_SQuAD | 3 | 6612334 | # Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains code to read tokenized data from file,
truncate, pad and process it into batches ready for training"""
import random
import time
import re
import nltk
import numpy as np
from six.moves import xrange
from vocab import PAD_ID, UNK_ID
from bilm import Batcher
from copy import deepcopy
class Batch(object):
"""A class to hold the information needed for a training batch"""
def __init__(self, context_ids, context_mask, context_tokens, qn_ids, qn_mask, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em, uuids=None):
"""
Inputs:
{context/qn}_ids: Numpy arrays.
Shape (batch_size, {context_len/question_len}). Contains padding.
{context/qn}_mask: Numpy arrays, same shape as _ids.
Contains 1s where there is real data, 0s where there is padding.
{context/qn/ans}_tokens: Lists length batch_size, containing lists (unpadded) of tokens (strings)
ans_span: numpy array, shape (batch_size, 2)
uuid: a list (length batch_size) of strings.
Not needed for training. Used by official_eval mode.
"""
self.context_ids = context_ids
self.context_mask = context_mask
self.context_tokens = context_tokens
self.context_char = context_char
self.qn_ids = qn_ids
self.qn_mask = qn_mask
self.qn_tokens = qn_tokens
self.qn_char = qn_char
self.context_pos_ids = context_pos_ids
self.qn_pos_ids = qn_pos_ids
self.context_ne_ids = context_ne_ids
self.qn_ne_ids = qn_ne_ids
self.context_em = context_em
self.ans_span = ans_span
self.ans_tokens = ans_tokens
self.uuids = uuids
self.batch_size = len(self.context_tokens)
def split_by_whitespace(sentence):
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(" ", space_separated_fragment))
return [w for w in words if w]
def intstr_to_intlist(string):
"""Given a string e.g. '311 9 1334 635 6192 56 639', returns as a list of integers"""
return [int(s) for s in string.split()]
def sentence_to_token_ids(sentence, word2id):
"""Turns an already-tokenized sentence string into word indices
e.g. "i do n't know" -> [9, 32, 16, 96]
Note any token that isn't in the word2id mapping gets mapped to the id for UNK
"""
tokens = split_by_whitespace(sentence) # list of strings
ids = [word2id.get(w, UNK_ID) for w in tokens]
return tokens, ids
# NOTE: CHANGE
def token_to_pos_ne_id(tokens, pos_tag_id_map, ne_tag_id_map):
"""Turns an already-tokenized sentence string into word indices
e.g. "i do n't know" -> [9, 32, 16, 96]
Note any token that isn't in the word2id mapping gets mapped to the id for UNK
"""
pos_tags = nltk.tree2conlltags(nltk.ne_chunk(nltk.pos_tag(tokens), binary=False))
pos_id = []
ne_id = []
for tag_p in pos_tags:
pos_tag = tag_p[1]
ne_tag = tag_p[2]
if pos_tag in pos_tag_id_map:
pos_id.append(pos_tag_id_map[pos_tag])
else:
print ('pos tag mis match')
pos_id.append(0)
if ne_tag in ne_tag_id_map:
ne_id.append(ne_tag_id_map[ne_tag])
else:
print ('ne tag mis match')
ne_id.append(0)
return pos_id, ne_id
def get_em(context_token, qn_token):
result = [0] * len(context_token)
qn_token_set = set(qn_token)
for i in range(len(context_token)):
if context_token[i] in qn_token:
result[i] = 1
return result
def get_pos_ne_id(line):
str_result = line.split()
return list(map(int, str_result))
def padded(token_batch, batch_pad=0):
"""
Inputs:
token_batch: List (length batch size) of lists of ints.
batch_pad: Int. Length to pad to. If 0, pad to maximum length sequence in token_batch.
Returns:
List (length batch_size) of padded of lists of ints.
All are same length - batch_pad if batch_pad!=0, otherwise the maximum length in token_batch
"""
maxlen = max([len(x) for x in token_batch]) if batch_pad == 0 else batch_pad
return [token_list + [PAD_ID] * (maxlen - len(token_list)) for token_list in token_batch]
def refill_batches(batches, word2id, context_file, qn_file, ans_file, context_pos_file, qn_pos_file, context_ne_file, qn_ne_file, batch_size, context_len, question_len, discard_long, batcher):
"""
Adds more batches into the "batches" list.
Inputs:
batches: list to add batches to
word2id: dictionary mapping word (string) to word id (int)
context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files
batch_size: int. how big to make the batches
context_len, question_len: max length of context and question respectively
discard_long: If True, discard any examples that are longer than context_len or question_len.
If False, truncate those exmaples instead.
"""
print ("Refilling batches...")
tic = time.time()
examples = [] # list of (qn_ids, context_ids, ans_span, ans_tokens) triples
context_line, qn_line, ans_line = context_file.readline(), qn_file.readline(), ans_file.readline() # read the next line from each
context_pos_line, qn_pos_line, context_ne_line, qn_ne_line = context_pos_file.readline(), qn_pos_file.readline(), context_ne_file.readline(), qn_ne_file.readline()
while context_line and qn_line and ans_line: # while you haven't reached the end
# Convert tokens to word ids
context_tokens, context_ids = sentence_to_token_ids(context_line, word2id)
qn_tokens, qn_ids = sentence_to_token_ids(qn_line, word2id)
ans_span = intstr_to_intlist(ans_line)
context_tokens_char = deepcopy(context_tokens)
qn_tokens_char = deepcopy(qn_tokens)
# read the next line from each file
context_line, qn_line, ans_line = context_file.readline(), qn_file.readline(), ans_file.readline()
# get ans_tokens from ans_span
assert len(ans_span) == 2
if ans_span[1] < ans_span[0]:
print ("Found an ill-formed gold span: start=%i end=%i" % (ans_span[0], ans_span[1]))
continue
ans_tokens = context_tokens[ans_span[0] : ans_span[1]+1] # list of strings
# discard or truncate too-long questions
if len(qn_ids) > question_len:
if discard_long:
continue
else: # truncate
qn_ids = qn_ids[:question_len]
qn_tokens_char = qn_tokens_char[:question_len]
# discard or truncate too-long contexts
if len(context_ids) > context_len:
if discard_long:
continue
else: # truncate
context_ids = context_ids[:context_len]
context_tokens_char = context_tokens_char[:context_len]
# add to examples
# NOTE: Change
context_pos_id = get_pos_ne_id(context_pos_line)
context_ne_id = get_pos_ne_id(context_ne_line)
qn_pos_id = get_pos_ne_id(qn_pos_line)
qn_ne_id = get_pos_ne_id(qn_ne_line)
context_em = get_em(context_tokens_char, qn_tokens_char)
examples.append((context_ids, context_tokens, qn_ids, qn_tokens, ans_span, ans_tokens, context_tokens_char, qn_tokens_char, context_pos_id, qn_pos_id, context_ne_id, qn_ne_id, context_em))
# stop refilling if you have 160 batches
if len(examples) == batch_size * 160:
break
# Once you've either got 160 batches or you've reached end of file:
# Sort by question length
# Note: if you sort by context length, then you'll have batches which contain the same context many times (because each context appears several times, with different questions)
examples = sorted(examples, key=lambda e: len(e[2]))
# Make into batches and append to the list batches
for batch_start in range(0, len(examples), batch_size):
# Note: each of these is a list length batch_size of lists of ints (except on last iter when it might be less than batch_size)
context_ids_batch, context_tokens_batch, qn_ids_batch, qn_tokens_batch, ans_span_batch, ans_tokens_batch, context_tokens_char_batch, qn_tokens_char_batch, context_pos_id_batch, qn_pos_id_batch, context_ne_id_batch, qn_ne_id_batch, context_em_batch= list(zip(*examples[batch_start:batch_start+batch_size]))
# NOTE: Change
context_char_batch = batcher.batch_sentences(context_tokens_char_batch, context_len) # already padded
qn_char_batch = batcher.batch_sentences(qn_tokens_char_batch, question_len) # already padded
batches.append((context_ids_batch, context_tokens_batch, qn_ids_batch, qn_tokens_batch, ans_span_batch, ans_tokens_batch, context_char_batch, qn_char_batch,context_pos_id_batch, qn_pos_id_batch, context_ne_id_batch, qn_ne_id_batch, context_em_batch))
# shuffle the batches
random.shuffle(batches)
toc = time.time()
print ("Refilling batches took %.2f seconds" % (toc-tic))
return
def get_batch_generator(word2id, context_path, qn_path, ans_path, context_pos_path, qn_pos_path, context_ne_path, qn_ne_path, batch_size, context_len, question_len, discard_long, batcher):
"""
This function returns a generator object that yields batches.
The last batch in the dataset will be a partial batch.
Read this to understand generators and the yield keyword in Python: https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do
Inputs:
word2id: dictionary mapping word (string) to word id (int)
context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files
batch_size: int. how big to make the batches
context_len, question_len: max length of context and question respectively
discard_long: If True, discard any examples that are longer than context_len or question_len.
If False, truncate those exmaples instead.
"""
context_file, qn_file, ans_file = open(context_path), open(qn_path), open(ans_path)
context_pos_file, qn_pos_file, context_ne_file, qn_ne_file = open(context_pos_path), open(qn_pos_path), open(context_ne_path), open(qn_ne_path)
batches = []
while True:
if len(batches) == 0: # add more batches
refill_batches(batches, word2id, context_file, qn_file, ans_file, context_pos_file, qn_pos_file, context_ne_file, qn_ne_file, batch_size, context_len, question_len, discard_long, batcher)
if len(batches) == 0:
break
# NOTE: CHANGE
# Get next batch. These are all lists length batch_size
(context_ids, context_tokens, qn_ids, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em) = batches.pop(0)
# Pad context_ids and qn_ids
qn_ids = padded(qn_ids, question_len) # pad questions to length question_len
context_ids = padded(context_ids, context_len) # pad contexts to length context_len
context_pos_ids = padded(context_pos_ids, context_len)
context_ne_ids = padded(context_ne_ids, context_len)
qn_pos_ids = padded(qn_pos_ids, question_len)
qn_ne_ids = padded(qn_ne_ids, question_len)
context_em = padded(qn_ne_ids, context_len)
# Make qn_ids into a np array and create qn_mask
qn_ids = np.array(qn_ids) # shape (question_len, batch_size)
qn_mask = (qn_ids != PAD_ID).astype(np.int32) # shape (question_len, batch_size)
qn_pos_ids = np.array(qn_pos_ids)
qn_ne_ids = np.array(qn_ne_ids)
# Make context_ids into a np array and create context_mask
context_ids = np.array(context_ids) # shape (context_len, batch_size)
context_mask = (context_ids != PAD_ID).astype(np.int32) # shape (context_len, batch_size)
context_pos_ids = np.array(context_pos_ids)
context_ne_ids = np.array(context_ne_ids)
context_em = np.array(context_em)
# Make ans_span into a np array
ans_span = np.array(ans_span) # shape (batch_size, 2)
# Make into a Batch object
batch = Batch(context_ids, context_mask, context_tokens, qn_ids, qn_mask, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em)
yield batch
return
| # Copyright 2018 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains code to read tokenized data from file,
truncate, pad and process it into batches ready for training"""
import random
import time
import re
import nltk
import numpy as np
from six.moves import xrange
from vocab import PAD_ID, UNK_ID
from bilm import Batcher
from copy import deepcopy
class Batch(object):
"""A class to hold the information needed for a training batch"""
def __init__(self, context_ids, context_mask, context_tokens, qn_ids, qn_mask, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em, uuids=None):
"""
Inputs:
{context/qn}_ids: Numpy arrays.
Shape (batch_size, {context_len/question_len}). Contains padding.
{context/qn}_mask: Numpy arrays, same shape as _ids.
Contains 1s where there is real data, 0s where there is padding.
{context/qn/ans}_tokens: Lists length batch_size, containing lists (unpadded) of tokens (strings)
ans_span: numpy array, shape (batch_size, 2)
uuid: a list (length batch_size) of strings.
Not needed for training. Used by official_eval mode.
"""
self.context_ids = context_ids
self.context_mask = context_mask
self.context_tokens = context_tokens
self.context_char = context_char
self.qn_ids = qn_ids
self.qn_mask = qn_mask
self.qn_tokens = qn_tokens
self.qn_char = qn_char
self.context_pos_ids = context_pos_ids
self.qn_pos_ids = qn_pos_ids
self.context_ne_ids = context_ne_ids
self.qn_ne_ids = qn_ne_ids
self.context_em = context_em
self.ans_span = ans_span
self.ans_tokens = ans_tokens
self.uuids = uuids
self.batch_size = len(self.context_tokens)
def split_by_whitespace(sentence):
words = []
for space_separated_fragment in sentence.strip().split():
words.extend(re.split(" ", space_separated_fragment))
return [w for w in words if w]
def intstr_to_intlist(string):
"""Given a string e.g. '311 9 1334 635 6192 56 639', returns as a list of integers"""
return [int(s) for s in string.split()]
def sentence_to_token_ids(sentence, word2id):
"""Turns an already-tokenized sentence string into word indices
e.g. "i do n't know" -> [9, 32, 16, 96]
Note any token that isn't in the word2id mapping gets mapped to the id for UNK
"""
tokens = split_by_whitespace(sentence) # list of strings
ids = [word2id.get(w, UNK_ID) for w in tokens]
return tokens, ids
# NOTE: CHANGE
def token_to_pos_ne_id(tokens, pos_tag_id_map, ne_tag_id_map):
"""Turns an already-tokenized sentence string into word indices
e.g. "i do n't know" -> [9, 32, 16, 96]
Note any token that isn't in the word2id mapping gets mapped to the id for UNK
"""
pos_tags = nltk.tree2conlltags(nltk.ne_chunk(nltk.pos_tag(tokens), binary=False))
pos_id = []
ne_id = []
for tag_p in pos_tags:
pos_tag = tag_p[1]
ne_tag = tag_p[2]
if pos_tag in pos_tag_id_map:
pos_id.append(pos_tag_id_map[pos_tag])
else:
print ('pos tag mis match')
pos_id.append(0)
if ne_tag in ne_tag_id_map:
ne_id.append(ne_tag_id_map[ne_tag])
else:
print ('ne tag mis match')
ne_id.append(0)
return pos_id, ne_id
def get_em(context_token, qn_token):
result = [0] * len(context_token)
qn_token_set = set(qn_token)
for i in range(len(context_token)):
if context_token[i] in qn_token:
result[i] = 1
return result
def get_pos_ne_id(line):
str_result = line.split()
return list(map(int, str_result))
def padded(token_batch, batch_pad=0):
"""
Inputs:
token_batch: List (length batch size) of lists of ints.
batch_pad: Int. Length to pad to. If 0, pad to maximum length sequence in token_batch.
Returns:
List (length batch_size) of padded of lists of ints.
All are same length - batch_pad if batch_pad!=0, otherwise the maximum length in token_batch
"""
maxlen = max([len(x) for x in token_batch]) if batch_pad == 0 else batch_pad
return [token_list + [PAD_ID] * (maxlen - len(token_list)) for token_list in token_batch]
def refill_batches(batches, word2id, context_file, qn_file, ans_file, context_pos_file, qn_pos_file, context_ne_file, qn_ne_file, batch_size, context_len, question_len, discard_long, batcher):
"""
Adds more batches into the "batches" list.
Inputs:
batches: list to add batches to
word2id: dictionary mapping word (string) to word id (int)
context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files
batch_size: int. how big to make the batches
context_len, question_len: max length of context and question respectively
discard_long: If True, discard any examples that are longer than context_len or question_len.
If False, truncate those exmaples instead.
"""
print ("Refilling batches...")
tic = time.time()
examples = [] # list of (qn_ids, context_ids, ans_span, ans_tokens) triples
context_line, qn_line, ans_line = context_file.readline(), qn_file.readline(), ans_file.readline() # read the next line from each
context_pos_line, qn_pos_line, context_ne_line, qn_ne_line = context_pos_file.readline(), qn_pos_file.readline(), context_ne_file.readline(), qn_ne_file.readline()
while context_line and qn_line and ans_line: # while you haven't reached the end
# Convert tokens to word ids
context_tokens, context_ids = sentence_to_token_ids(context_line, word2id)
qn_tokens, qn_ids = sentence_to_token_ids(qn_line, word2id)
ans_span = intstr_to_intlist(ans_line)
context_tokens_char = deepcopy(context_tokens)
qn_tokens_char = deepcopy(qn_tokens)
# read the next line from each file
context_line, qn_line, ans_line = context_file.readline(), qn_file.readline(), ans_file.readline()
# get ans_tokens from ans_span
assert len(ans_span) == 2
if ans_span[1] < ans_span[0]:
print ("Found an ill-formed gold span: start=%i end=%i" % (ans_span[0], ans_span[1]))
continue
ans_tokens = context_tokens[ans_span[0] : ans_span[1]+1] # list of strings
# discard or truncate too-long questions
if len(qn_ids) > question_len:
if discard_long:
continue
else: # truncate
qn_ids = qn_ids[:question_len]
qn_tokens_char = qn_tokens_char[:question_len]
# discard or truncate too-long contexts
if len(context_ids) > context_len:
if discard_long:
continue
else: # truncate
context_ids = context_ids[:context_len]
context_tokens_char = context_tokens_char[:context_len]
# add to examples
# NOTE: Change
context_pos_id = get_pos_ne_id(context_pos_line)
context_ne_id = get_pos_ne_id(context_ne_line)
qn_pos_id = get_pos_ne_id(qn_pos_line)
qn_ne_id = get_pos_ne_id(qn_ne_line)
context_em = get_em(context_tokens_char, qn_tokens_char)
examples.append((context_ids, context_tokens, qn_ids, qn_tokens, ans_span, ans_tokens, context_tokens_char, qn_tokens_char, context_pos_id, qn_pos_id, context_ne_id, qn_ne_id, context_em))
# stop refilling if you have 160 batches
if len(examples) == batch_size * 160:
break
# Once you've either got 160 batches or you've reached end of file:
# Sort by question length
# Note: if you sort by context length, then you'll have batches which contain the same context many times (because each context appears several times, with different questions)
examples = sorted(examples, key=lambda e: len(e[2]))
# Make into batches and append to the list batches
for batch_start in range(0, len(examples), batch_size):
# Note: each of these is a list length batch_size of lists of ints (except on last iter when it might be less than batch_size)
context_ids_batch, context_tokens_batch, qn_ids_batch, qn_tokens_batch, ans_span_batch, ans_tokens_batch, context_tokens_char_batch, qn_tokens_char_batch, context_pos_id_batch, qn_pos_id_batch, context_ne_id_batch, qn_ne_id_batch, context_em_batch= list(zip(*examples[batch_start:batch_start+batch_size]))
# NOTE: Change
context_char_batch = batcher.batch_sentences(context_tokens_char_batch, context_len) # already padded
qn_char_batch = batcher.batch_sentences(qn_tokens_char_batch, question_len) # already padded
batches.append((context_ids_batch, context_tokens_batch, qn_ids_batch, qn_tokens_batch, ans_span_batch, ans_tokens_batch, context_char_batch, qn_char_batch,context_pos_id_batch, qn_pos_id_batch, context_ne_id_batch, qn_ne_id_batch, context_em_batch))
# shuffle the batches
random.shuffle(batches)
toc = time.time()
print ("Refilling batches took %.2f seconds" % (toc-tic))
return
def get_batch_generator(word2id, context_path, qn_path, ans_path, context_pos_path, qn_pos_path, context_ne_path, qn_ne_path, batch_size, context_len, question_len, discard_long, batcher):
"""
This function returns a generator object that yields batches.
The last batch in the dataset will be a partial batch.
Read this to understand generators and the yield keyword in Python: https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do
Inputs:
word2id: dictionary mapping word (string) to word id (int)
context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files
batch_size: int. how big to make the batches
context_len, question_len: max length of context and question respectively
discard_long: If True, discard any examples that are longer than context_len or question_len.
If False, truncate those exmaples instead.
"""
context_file, qn_file, ans_file = open(context_path), open(qn_path), open(ans_path)
context_pos_file, qn_pos_file, context_ne_file, qn_ne_file = open(context_pos_path), open(qn_pos_path), open(context_ne_path), open(qn_ne_path)
batches = []
while True:
if len(batches) == 0: # add more batches
refill_batches(batches, word2id, context_file, qn_file, ans_file, context_pos_file, qn_pos_file, context_ne_file, qn_ne_file, batch_size, context_len, question_len, discard_long, batcher)
if len(batches) == 0:
break
# NOTE: CHANGE
# Get next batch. These are all lists length batch_size
(context_ids, context_tokens, qn_ids, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em) = batches.pop(0)
# Pad context_ids and qn_ids
qn_ids = padded(qn_ids, question_len) # pad questions to length question_len
context_ids = padded(context_ids, context_len) # pad contexts to length context_len
context_pos_ids = padded(context_pos_ids, context_len)
context_ne_ids = padded(context_ne_ids, context_len)
qn_pos_ids = padded(qn_pos_ids, question_len)
qn_ne_ids = padded(qn_ne_ids, question_len)
context_em = padded(qn_ne_ids, context_len)
# Make qn_ids into a np array and create qn_mask
qn_ids = np.array(qn_ids) # shape (question_len, batch_size)
qn_mask = (qn_ids != PAD_ID).astype(np.int32) # shape (question_len, batch_size)
qn_pos_ids = np.array(qn_pos_ids)
qn_ne_ids = np.array(qn_ne_ids)
# Make context_ids into a np array and create context_mask
context_ids = np.array(context_ids) # shape (context_len, batch_size)
context_mask = (context_ids != PAD_ID).astype(np.int32) # shape (context_len, batch_size)
context_pos_ids = np.array(context_pos_ids)
context_ne_ids = np.array(context_ne_ids)
context_em = np.array(context_em)
# Make ans_span into a np array
ans_span = np.array(ans_span) # shape (batch_size, 2)
# Make into a Batch object
batch = Batch(context_ids, context_mask, context_tokens, qn_ids, qn_mask, qn_tokens, ans_span, ans_tokens, context_char, qn_char, context_pos_ids, qn_pos_ids, context_ne_ids, qn_ne_ids, context_em)
yield batch
return
| en | 0.825216 | # Copyright 2018 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. This file contains code to read tokenized data from file, truncate, pad and process it into batches ready for training A class to hold the information needed for a training batch Inputs: {context/qn}_ids: Numpy arrays. Shape (batch_size, {context_len/question_len}). Contains padding. {context/qn}_mask: Numpy arrays, same shape as _ids. Contains 1s where there is real data, 0s where there is padding. {context/qn/ans}_tokens: Lists length batch_size, containing lists (unpadded) of tokens (strings) ans_span: numpy array, shape (batch_size, 2) uuid: a list (length batch_size) of strings. Not needed for training. Used by official_eval mode. Given a string e.g. '311 9 1334 635 6192 56 639', returns as a list of integers Turns an already-tokenized sentence string into word indices e.g. "i do n't know" -> [9, 32, 16, 96] Note any token that isn't in the word2id mapping gets mapped to the id for UNK # list of strings # NOTE: CHANGE Turns an already-tokenized sentence string into word indices e.g. "i do n't know" -> [9, 32, 16, 96] Note any token that isn't in the word2id mapping gets mapped to the id for UNK Inputs: token_batch: List (length batch size) of lists of ints. batch_pad: Int. Length to pad to. If 0, pad to maximum length sequence in token_batch. Returns: List (length batch_size) of padded of lists of ints. All are same length - batch_pad if batch_pad!=0, otherwise the maximum length in token_batch Adds more batches into the "batches" list. Inputs: batches: list to add batches to word2id: dictionary mapping word (string) to word id (int) context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files batch_size: int. how big to make the batches context_len, question_len: max length of context and question respectively discard_long: If True, discard any examples that are longer than context_len or question_len. If False, truncate those exmaples instead. # list of (qn_ids, context_ids, ans_span, ans_tokens) triples # read the next line from each # while you haven't reached the end # Convert tokens to word ids # read the next line from each file # get ans_tokens from ans_span # list of strings # discard or truncate too-long questions # truncate # discard or truncate too-long contexts # truncate # add to examples # NOTE: Change # stop refilling if you have 160 batches # Once you've either got 160 batches or you've reached end of file: # Sort by question length # Note: if you sort by context length, then you'll have batches which contain the same context many times (because each context appears several times, with different questions) # Make into batches and append to the list batches # Note: each of these is a list length batch_size of lists of ints (except on last iter when it might be less than batch_size) # NOTE: Change # already padded # already padded # shuffle the batches This function returns a generator object that yields batches. The last batch in the dataset will be a partial batch. Read this to understand generators and the yield keyword in Python: https://stackoverflow.com/questions/231767/what-does-the-yield-keyword-do Inputs: word2id: dictionary mapping word (string) to word id (int) context_file, qn_file, ans_file: paths to {train/dev}.{context/question/answer} data files batch_size: int. how big to make the batches context_len, question_len: max length of context and question respectively discard_long: If True, discard any examples that are longer than context_len or question_len. If False, truncate those exmaples instead. # add more batches # NOTE: CHANGE # Get next batch. These are all lists length batch_size # Pad context_ids and qn_ids # pad questions to length question_len # pad contexts to length context_len # Make qn_ids into a np array and create qn_mask # shape (question_len, batch_size) # shape (question_len, batch_size) # Make context_ids into a np array and create context_mask # shape (context_len, batch_size) # shape (context_len, batch_size) # Make ans_span into a np array # shape (batch_size, 2) # Make into a Batch object | 2.527918 | 3 |
cookiecutter-strapi/hooks/post_gen_project.py | link2cory/cookiecutter-react-collection | 0 | 6612335 | <filename>cookiecutter-strapi/hooks/post_gen_project.py<gh_stars>0
import os, subprocess, sys
sys.path.insert(0, "{{ cookiecutter.project_root }}")
from common.post_gen_hook import main
# cookiecutter jinja2 obj is extracted as an OrderedDict
from collections import OrderedDict
main({{ cookiecutter }})
| <filename>cookiecutter-strapi/hooks/post_gen_project.py<gh_stars>0
import os, subprocess, sys
sys.path.insert(0, "{{ cookiecutter.project_root }}")
from common.post_gen_hook import main
# cookiecutter jinja2 obj is extracted as an OrderedDict
from collections import OrderedDict
main({{ cookiecutter }})
| en | 0.917909 | # cookiecutter jinja2 obj is extracted as an OrderedDict | 1.533653 | 2 |
SPIB_training.py | tiwarylab/State-Predictive-Information-Bottleneck | 4 | 6612336 | """
SPIB: A deep learning-based framework to learn RCs
from MD trajectories. Code maintained by Dedi.
Read and cite the following when using this method:
https://aip.scitation.org/doi/abs/10.1063/5.0038198
"""
import torch
import numpy as np
import time
import os
# Data Processing
# ------------------------------------------------------------------------------
def data_init(t0, dt, traj_data, traj_label, traj_weights):
assert len(traj_data)==len(traj_label)
# skip the first t0 data
past_data = traj_data[t0:(len(traj_data)-dt)]
future_data = traj_data[(t0+dt):len(traj_data)]
label = traj_label[(t0+dt):len(traj_data)]
# data shape
data_shape = past_data.shape[1:]
n_data = len(past_data)
# 90% random test/train split
p = np.random.permutation(n_data)
past_data = past_data[p]
future_data = future_data[p]
label = label[p]
past_data_train = past_data[0: (9 * n_data) // 10]
past_data_test = past_data[(9 * n_data) // 10:]
future_data_train = future_data[0: (9 * n_data) // 10]
future_data_test = future_data[(9 * n_data) // 10:]
label_train = label[0: (9 * n_data) // 10]
label_test = label[(9 * n_data) // 10:]
if traj_weights != None:
assert len(traj_data)==len(traj_weights)
weights = traj_weights[t0:(len(traj_data)-dt)]
weights = weights[p]
weights_train = weights[0: (9 * n_data) // 10]
weights_test = weights[(9 * n_data) // 10:]
else:
weights_train = None
weights_test = None
return data_shape, past_data_train, future_data_train, label_train, weights_train,\
past_data_test, future_data_test, label_test, weights_test
# Loss function
# ------------------------------------------------------------------------------
def calculate_loss(IB, data_inputs, data_targets, data_weights, beta=1.0):
# pass through VAE
outputs, z_sample, z_mean, z_logvar = IB.forward(data_inputs)
# KL Divergence
log_p = IB.log_p(z_sample)
log_q = -0.5 * torch.sum(z_logvar + torch.pow(z_sample-z_mean, 2)
/torch.exp(z_logvar), dim=1)
if data_weights == None:
# Reconstruction loss is cross-entropy
reconstruction_error = torch.mean(torch.sum(-data_targets*outputs, dim=1))
# KL Divergence
kl_loss = torch.mean(log_q-log_p)
else:
# Reconstruction loss is cross-entropy
# reweighed
reconstruction_error = torch.mean(data_weights*torch.sum(-data_targets*outputs, dim=1))
# KL Divergence
kl_loss = torch.mean(data_weights*(log_q-log_p))
loss = reconstruction_error + beta*kl_loss
return loss, reconstruction_error.float(), kl_loss.float()
# Train and test model
# ------------------------------------------------------------------------------
def sample_minibatch(past_data, data_labels, data_weights, indices, device):
sample_past_data = past_data[indices].to(device)
sample_data_labels = data_labels[indices].to(device)
if data_weights == None:
sample_data_weights = None
else:
sample_data_weights = data_weights[indices].to(device)
return sample_past_data, sample_data_labels, sample_data_weights
def train(IB, beta, train_past_data, train_future_data, init_train_data_labels, train_data_weights, \
test_past_data, test_future_data, init_test_data_labels, test_data_weights, \
optimizer, scheduler, batch_size, threshold, patience, refinements, output_path, log_interval, device, index):
IB.train()
step = 0
start = time.time()
log_path = output_path + '_train.log'
os.makedirs(os.path.dirname(log_path), exist_ok=True)
IB_path = output_path + "cpt" + str(index) + "/IB"
os.makedirs(os.path.dirname(IB_path), exist_ok=True)
train_data_labels = init_train_data_labels
test_data_labels = init_test_data_labels
update_times = 0
unchanged_epochs = 0
epoch = 0
# initial state population
state_population0 = torch.sum(train_data_labels,dim=0).float()/train_data_labels.shape[0]
# record the default optimizer state
initial_opt_state_dict = scheduler.optimizer.state_dict()
while True:
train_permutation = torch.randperm(len(train_past_data))
test_permutation = torch.randperm(len(test_past_data))
for i in range(0, len(train_past_data), batch_size):
step += 1
if i+batch_size>len(train_past_data):
break
train_indices = train_permutation[i:i+batch_size]
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, \
train_data_weights, train_indices, device)
loss, reconstruction_error, kl_loss= calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
# Stop if NaN is obtained
if(torch.isnan(loss).any()):
return True
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
if step % 500 == 0:
with torch.no_grad():
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, \
train_data_weights, train_indices, device)
loss, reconstruction_error, kl_loss= calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
train_time = time.time() - start
print(
"Iteration %i:\tTime %f s\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
step, train_time, loss, kl_loss, reconstruction_error))
print(
"Iteration %i:\tTime %f s\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
step, train_time, loss, kl_loss, reconstruction_error), file=open(log_path, 'a'))
j=i%len(test_permutation)
test_indices = test_permutation[j:j+batch_size]
batch_inputs, batch_outputs, batch_weights = sample_minibatch(test_past_data, test_data_labels, \
test_data_weights, test_indices, device)
loss, reconstruction_error, kl_loss = calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
train_time = time.time() - start
print(
"Loss (test) %f\tKL loss (test): %f\n"
"Reconstruction loss (test) %f" % (
loss, kl_loss, reconstruction_error))
print(
"Loss (test) %f\tKL loss (test): %f\n"
"Reconstruction loss (test) %f" % (
loss, kl_loss, reconstruction_error), file=open(log_path, 'a'))
if step % log_interval == 0:
# save model
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_%d_cpt.pt'%step)
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_%d_optim_cpt.pt'%step)
epoch+=1
# check convergence
new_train_data_labels = IB.update_labels(train_future_data, batch_size)
# save the state population
state_population = torch.sum(new_train_data_labels,dim=0).float()/new_train_data_labels.shape[0]
print(state_population)
print(state_population, file=open(log_path, 'a'))
# print the state population change
state_population_change = torch.sqrt(torch.square(state_population-state_population0).sum())
print('State population change=%f'%state_population_change)
print('State population change=%f'%state_population_change, file=open(log_path, 'a'))
# update state_population
state_population0 = state_population
scheduler.step()
if scheduler.gamma < 1:
print("Update lr to %f"%(optimizer.param_groups[0]['lr']))
print("Update lr to %f"%(optimizer.param_groups[0]['lr']), file=open(log_path, 'a'))
# check whether the change of the state population is smaller than the threshold
if state_population_change < threshold:
unchanged_epochs += 1
if unchanged_epochs > patience:
# check whether only one state is found
if torch.sum(state_population>0)<2:
print("Only one metastable state is found!")
break
# Stop only if update_times >= refinements
if IB.UpdateLabel and update_times < refinements:
train_data_labels = new_train_data_labels
test_data_labels = IB.update_labels(test_future_data, batch_size)
update_times+=1
print("Update %d\n"%(update_times))
print("Update %d\n"%(update_times), file=open(log_path, 'a'))
# reset epoch and unchanged_epochs
epoch = 0
unchanged_epochs = 0
# reset the representative-inputs
representative_inputs = IB.estimatate_representative_inputs(train_past_data, train_data_weights, batch_size)
IB.reset_representative(representative_inputs.to(device))
# reset the optimizer and scheduler
scheduler.optimizer.load_state_dict(initial_opt_state_dict)
scheduler.last_epoch = -1
else:
break
else:
unchanged_epochs = 0
print("Epoch: %d\n"%(epoch))
print("Epoch: %d\n"%(epoch), file=open(log_path, 'a'))
# output the saving path
total_training_time = time.time() - start
print("Total training time: %f" % total_training_time)
print("Total training time: %f" % total_training_time, file=open(log_path, 'a'))
# save model
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_%d_cpt.pt'%step)
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_%d_optim_cpt.pt'%step)
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_final_cpt.pt')
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_final_optim_cpt.pt')
return False
@torch.no_grad()
def output_final_result(IB, device, train_past_data, train_future_data, train_data_labels, train_data_weights, \
test_past_data, test_future_data, test_data_labels, test_data_weights, batch_size, output_path, \
path, dt, beta, learning_rate, index=0):
with torch.no_grad():
final_result_path = output_path + '_final_result' + str(index) + '.npy'
os.makedirs(os.path.dirname(final_result_path), exist_ok=True)
# label update
if IB.UpdateLabel:
train_data_labels = IB.update_labels(train_future_data, batch_size)
test_data_labels = IB.update_labels(test_future_data, batch_size)
final_result = []
# output the result
loss, reconstruction_error, kl_loss= [0 for i in range(3)]
for i in range(0, len(train_past_data), batch_size):
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, train_data_weights, \
range(i,min(i+batch_size,len(train_past_data))), IB.device)
loss1, reconstruction_error1, kl_loss1 = calculate_loss(IB, batch_inputs, batch_outputs, \
batch_weights, beta)
loss += loss1*len(batch_inputs)
reconstruction_error += reconstruction_error1*len(batch_inputs)
kl_loss += kl_loss1*len(batch_inputs)
# output the result
loss/=len(train_past_data)
reconstruction_error/=len(train_past_data)
kl_loss/=len(train_past_data)
final_result += [loss.data.cpu().numpy(), reconstruction_error.cpu().data.numpy(), kl_loss.cpu().data.numpy()]
print(
"Final: %d\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
index, loss, kl_loss, reconstruction_error))
print(
"Final: %d\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
index, loss, kl_loss, reconstruction_error),
file=open(path, 'a'))
loss, reconstruction_error, kl_loss = [0 for i in range(3)]
for i in range(0, len(test_past_data), batch_size):
batch_inputs, batch_outputs, batch_weights = sample_minibatch(test_past_data, test_data_labels, test_data_weights, \
range(i,min(i+batch_size,len(test_past_data))), IB.device)
loss1, reconstruction_error1, kl_loss1 = calculate_loss(IB, batch_inputs, batch_outputs, \
batch_weights, beta)
loss += loss1*len(batch_inputs)
reconstruction_error += reconstruction_error1*len(batch_inputs)
kl_loss += kl_loss1*len(batch_inputs)
# output the result
loss/=len(test_past_data)
reconstruction_error/=len(test_past_data)
kl_loss/=len(test_past_data)
final_result += [loss.cpu().data.numpy(), reconstruction_error.cpu().data.numpy(), kl_loss.cpu().data.numpy()]
print(
"Loss (test) %f\tKL loss (train): %f\n"
"Reconstruction loss (test) %f"
% (loss, kl_loss, reconstruction_error))
print(
"Loss (test) %f\tKL loss (train): %f\n"
"Reconstruction loss (test) %f"
% (loss, kl_loss, reconstruction_error), file=open(path, 'a'))
print("dt: %d\t Beta: %f\t Learning_rate: %f" % (
dt, beta, learning_rate))
print("dt: %d\t Beta: %f\t Learning_rate: %f" % (
dt, beta, learning_rate),
file=open(path, 'a'))
final_result = np.array(final_result)
np.save(final_result_path, final_result)
| """
SPIB: A deep learning-based framework to learn RCs
from MD trajectories. Code maintained by Dedi.
Read and cite the following when using this method:
https://aip.scitation.org/doi/abs/10.1063/5.0038198
"""
import torch
import numpy as np
import time
import os
# Data Processing
# ------------------------------------------------------------------------------
def data_init(t0, dt, traj_data, traj_label, traj_weights):
assert len(traj_data)==len(traj_label)
# skip the first t0 data
past_data = traj_data[t0:(len(traj_data)-dt)]
future_data = traj_data[(t0+dt):len(traj_data)]
label = traj_label[(t0+dt):len(traj_data)]
# data shape
data_shape = past_data.shape[1:]
n_data = len(past_data)
# 90% random test/train split
p = np.random.permutation(n_data)
past_data = past_data[p]
future_data = future_data[p]
label = label[p]
past_data_train = past_data[0: (9 * n_data) // 10]
past_data_test = past_data[(9 * n_data) // 10:]
future_data_train = future_data[0: (9 * n_data) // 10]
future_data_test = future_data[(9 * n_data) // 10:]
label_train = label[0: (9 * n_data) // 10]
label_test = label[(9 * n_data) // 10:]
if traj_weights != None:
assert len(traj_data)==len(traj_weights)
weights = traj_weights[t0:(len(traj_data)-dt)]
weights = weights[p]
weights_train = weights[0: (9 * n_data) // 10]
weights_test = weights[(9 * n_data) // 10:]
else:
weights_train = None
weights_test = None
return data_shape, past_data_train, future_data_train, label_train, weights_train,\
past_data_test, future_data_test, label_test, weights_test
# Loss function
# ------------------------------------------------------------------------------
def calculate_loss(IB, data_inputs, data_targets, data_weights, beta=1.0):
# pass through VAE
outputs, z_sample, z_mean, z_logvar = IB.forward(data_inputs)
# KL Divergence
log_p = IB.log_p(z_sample)
log_q = -0.5 * torch.sum(z_logvar + torch.pow(z_sample-z_mean, 2)
/torch.exp(z_logvar), dim=1)
if data_weights == None:
# Reconstruction loss is cross-entropy
reconstruction_error = torch.mean(torch.sum(-data_targets*outputs, dim=1))
# KL Divergence
kl_loss = torch.mean(log_q-log_p)
else:
# Reconstruction loss is cross-entropy
# reweighed
reconstruction_error = torch.mean(data_weights*torch.sum(-data_targets*outputs, dim=1))
# KL Divergence
kl_loss = torch.mean(data_weights*(log_q-log_p))
loss = reconstruction_error + beta*kl_loss
return loss, reconstruction_error.float(), kl_loss.float()
# Train and test model
# ------------------------------------------------------------------------------
def sample_minibatch(past_data, data_labels, data_weights, indices, device):
sample_past_data = past_data[indices].to(device)
sample_data_labels = data_labels[indices].to(device)
if data_weights == None:
sample_data_weights = None
else:
sample_data_weights = data_weights[indices].to(device)
return sample_past_data, sample_data_labels, sample_data_weights
def train(IB, beta, train_past_data, train_future_data, init_train_data_labels, train_data_weights, \
test_past_data, test_future_data, init_test_data_labels, test_data_weights, \
optimizer, scheduler, batch_size, threshold, patience, refinements, output_path, log_interval, device, index):
IB.train()
step = 0
start = time.time()
log_path = output_path + '_train.log'
os.makedirs(os.path.dirname(log_path), exist_ok=True)
IB_path = output_path + "cpt" + str(index) + "/IB"
os.makedirs(os.path.dirname(IB_path), exist_ok=True)
train_data_labels = init_train_data_labels
test_data_labels = init_test_data_labels
update_times = 0
unchanged_epochs = 0
epoch = 0
# initial state population
state_population0 = torch.sum(train_data_labels,dim=0).float()/train_data_labels.shape[0]
# record the default optimizer state
initial_opt_state_dict = scheduler.optimizer.state_dict()
while True:
train_permutation = torch.randperm(len(train_past_data))
test_permutation = torch.randperm(len(test_past_data))
for i in range(0, len(train_past_data), batch_size):
step += 1
if i+batch_size>len(train_past_data):
break
train_indices = train_permutation[i:i+batch_size]
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, \
train_data_weights, train_indices, device)
loss, reconstruction_error, kl_loss= calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
# Stop if NaN is obtained
if(torch.isnan(loss).any()):
return True
optimizer.zero_grad()
loss.backward(retain_graph=True)
optimizer.step()
if step % 500 == 0:
with torch.no_grad():
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, \
train_data_weights, train_indices, device)
loss, reconstruction_error, kl_loss= calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
train_time = time.time() - start
print(
"Iteration %i:\tTime %f s\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
step, train_time, loss, kl_loss, reconstruction_error))
print(
"Iteration %i:\tTime %f s\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
step, train_time, loss, kl_loss, reconstruction_error), file=open(log_path, 'a'))
j=i%len(test_permutation)
test_indices = test_permutation[j:j+batch_size]
batch_inputs, batch_outputs, batch_weights = sample_minibatch(test_past_data, test_data_labels, \
test_data_weights, test_indices, device)
loss, reconstruction_error, kl_loss = calculate_loss(IB, batch_inputs, \
batch_outputs, batch_weights, beta)
train_time = time.time() - start
print(
"Loss (test) %f\tKL loss (test): %f\n"
"Reconstruction loss (test) %f" % (
loss, kl_loss, reconstruction_error))
print(
"Loss (test) %f\tKL loss (test): %f\n"
"Reconstruction loss (test) %f" % (
loss, kl_loss, reconstruction_error), file=open(log_path, 'a'))
if step % log_interval == 0:
# save model
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_%d_cpt.pt'%step)
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_%d_optim_cpt.pt'%step)
epoch+=1
# check convergence
new_train_data_labels = IB.update_labels(train_future_data, batch_size)
# save the state population
state_population = torch.sum(new_train_data_labels,dim=0).float()/new_train_data_labels.shape[0]
print(state_population)
print(state_population, file=open(log_path, 'a'))
# print the state population change
state_population_change = torch.sqrt(torch.square(state_population-state_population0).sum())
print('State population change=%f'%state_population_change)
print('State population change=%f'%state_population_change, file=open(log_path, 'a'))
# update state_population
state_population0 = state_population
scheduler.step()
if scheduler.gamma < 1:
print("Update lr to %f"%(optimizer.param_groups[0]['lr']))
print("Update lr to %f"%(optimizer.param_groups[0]['lr']), file=open(log_path, 'a'))
# check whether the change of the state population is smaller than the threshold
if state_population_change < threshold:
unchanged_epochs += 1
if unchanged_epochs > patience:
# check whether only one state is found
if torch.sum(state_population>0)<2:
print("Only one metastable state is found!")
break
# Stop only if update_times >= refinements
if IB.UpdateLabel and update_times < refinements:
train_data_labels = new_train_data_labels
test_data_labels = IB.update_labels(test_future_data, batch_size)
update_times+=1
print("Update %d\n"%(update_times))
print("Update %d\n"%(update_times), file=open(log_path, 'a'))
# reset epoch and unchanged_epochs
epoch = 0
unchanged_epochs = 0
# reset the representative-inputs
representative_inputs = IB.estimatate_representative_inputs(train_past_data, train_data_weights, batch_size)
IB.reset_representative(representative_inputs.to(device))
# reset the optimizer and scheduler
scheduler.optimizer.load_state_dict(initial_opt_state_dict)
scheduler.last_epoch = -1
else:
break
else:
unchanged_epochs = 0
print("Epoch: %d\n"%(epoch))
print("Epoch: %d\n"%(epoch), file=open(log_path, 'a'))
# output the saving path
total_training_time = time.time() - start
print("Total training time: %f" % total_training_time)
print("Total training time: %f" % total_training_time, file=open(log_path, 'a'))
# save model
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_%d_cpt.pt'%step)
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_%d_optim_cpt.pt'%step)
torch.save({'step': step,
'state_dict': IB.state_dict()},
IB_path+ '_final_cpt.pt')
torch.save({'optimizer': optimizer.state_dict()},
IB_path+ '_final_optim_cpt.pt')
return False
@torch.no_grad()
def output_final_result(IB, device, train_past_data, train_future_data, train_data_labels, train_data_weights, \
test_past_data, test_future_data, test_data_labels, test_data_weights, batch_size, output_path, \
path, dt, beta, learning_rate, index=0):
with torch.no_grad():
final_result_path = output_path + '_final_result' + str(index) + '.npy'
os.makedirs(os.path.dirname(final_result_path), exist_ok=True)
# label update
if IB.UpdateLabel:
train_data_labels = IB.update_labels(train_future_data, batch_size)
test_data_labels = IB.update_labels(test_future_data, batch_size)
final_result = []
# output the result
loss, reconstruction_error, kl_loss= [0 for i in range(3)]
for i in range(0, len(train_past_data), batch_size):
batch_inputs, batch_outputs, batch_weights = sample_minibatch(train_past_data, train_data_labels, train_data_weights, \
range(i,min(i+batch_size,len(train_past_data))), IB.device)
loss1, reconstruction_error1, kl_loss1 = calculate_loss(IB, batch_inputs, batch_outputs, \
batch_weights, beta)
loss += loss1*len(batch_inputs)
reconstruction_error += reconstruction_error1*len(batch_inputs)
kl_loss += kl_loss1*len(batch_inputs)
# output the result
loss/=len(train_past_data)
reconstruction_error/=len(train_past_data)
kl_loss/=len(train_past_data)
final_result += [loss.data.cpu().numpy(), reconstruction_error.cpu().data.numpy(), kl_loss.cpu().data.numpy()]
print(
"Final: %d\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
index, loss, kl_loss, reconstruction_error))
print(
"Final: %d\nLoss (train) %f\tKL loss (train): %f\n"
"Reconstruction loss (train) %f" % (
index, loss, kl_loss, reconstruction_error),
file=open(path, 'a'))
loss, reconstruction_error, kl_loss = [0 for i in range(3)]
for i in range(0, len(test_past_data), batch_size):
batch_inputs, batch_outputs, batch_weights = sample_minibatch(test_past_data, test_data_labels, test_data_weights, \
range(i,min(i+batch_size,len(test_past_data))), IB.device)
loss1, reconstruction_error1, kl_loss1 = calculate_loss(IB, batch_inputs, batch_outputs, \
batch_weights, beta)
loss += loss1*len(batch_inputs)
reconstruction_error += reconstruction_error1*len(batch_inputs)
kl_loss += kl_loss1*len(batch_inputs)
# output the result
loss/=len(test_past_data)
reconstruction_error/=len(test_past_data)
kl_loss/=len(test_past_data)
final_result += [loss.cpu().data.numpy(), reconstruction_error.cpu().data.numpy(), kl_loss.cpu().data.numpy()]
print(
"Loss (test) %f\tKL loss (train): %f\n"
"Reconstruction loss (test) %f"
% (loss, kl_loss, reconstruction_error))
print(
"Loss (test) %f\tKL loss (train): %f\n"
"Reconstruction loss (test) %f"
% (loss, kl_loss, reconstruction_error), file=open(path, 'a'))
print("dt: %d\t Beta: %f\t Learning_rate: %f" % (
dt, beta, learning_rate))
print("dt: %d\t Beta: %f\t Learning_rate: %f" % (
dt, beta, learning_rate),
file=open(path, 'a'))
final_result = np.array(final_result)
np.save(final_result_path, final_result)
| en | 0.60226 | SPIB: A deep learning-based framework to learn RCs
from MD trajectories. Code maintained by Dedi.
Read and cite the following when using this method:
https://aip.scitation.org/doi/abs/10.1063/5.0038198 # Data Processing # ------------------------------------------------------------------------------ # skip the first t0 data # data shape # 90% random test/train split # Loss function # ------------------------------------------------------------------------------ # pass through VAE # KL Divergence # Reconstruction loss is cross-entropy # KL Divergence # Reconstruction loss is cross-entropy # reweighed # KL Divergence # Train and test model # ------------------------------------------------------------------------------ # initial state population # record the default optimizer state # Stop if NaN is obtained # save model # check convergence # save the state population # print the state population change # update state_population # check whether the change of the state population is smaller than the threshold # check whether only one state is found # Stop only if update_times >= refinements # reset epoch and unchanged_epochs # reset the representative-inputs # reset the optimizer and scheduler # output the saving path # save model # label update # output the result # output the result # output the result | 2.531393 | 3 |
ia870/iaclose.py | andreperesnl/ia870 | 5 | 6612337 | # -*- encoding: utf-8 -*-
# Module iaclose
from numpy import *
def iaclose(f, b=None):
from iaero import iaero
from iadil import iadil
from iasecross import iasecross
if b is None:
b = iasecross()
y = iaero( iadil(f,b),b)
return y
| # -*- encoding: utf-8 -*-
# Module iaclose
from numpy import *
def iaclose(f, b=None):
from iaero import iaero
from iadil import iadil
from iasecross import iasecross
if b is None:
b = iasecross()
y = iaero( iadil(f,b),b)
return y
| en | 0.583734 | # -*- encoding: utf-8 -*- # Module iaclose | 2.731816 | 3 |
introduction-to-python/functions-and-packages/script_05.py | nhutnamhcmus/datacamp-playground | 1 | 6612338 | # Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Use append twice to add poolhouse and garage size
areas.append(24.5)
areas.append(15.45)
# Print out areas
print(areas)
# Reverse the orders of the elements in areas
areas = areas.reverse()
# Print out areas
print(areas) | # Create list areas
areas = [11.25, 18.0, 20.0, 10.75, 9.50]
# Use append twice to add poolhouse and garage size
areas.append(24.5)
areas.append(15.45)
# Print out areas
print(areas)
# Reverse the orders of the elements in areas
areas = areas.reverse()
# Print out areas
print(areas) | en | 0.798775 | # Create list areas # Use append twice to add poolhouse and garage size # Print out areas # Reverse the orders of the elements in areas # Print out areas | 3.911014 | 4 |
MachineLearning/titanic/dataProcessing.py | HeRaNO/ChickenRibs | 14 | 6612339 | <reponame>HeRaNO/ChickenRibs<filename>MachineLearning/titanic/dataProcessing.py
'''
dataProcessing.py:用于处理数据。
Author:HeRaNO
'''
import re
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
def makeTitleDict():
Title_Dict = {}
Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major'], 1))
Title_Dict.update(dict.fromkeys(['Jonkheer', 'Sir', 'Countess', 'Lady', 'Don', 'Dona'], 2))
Title_Dict.update(dict.fromkeys(['Dr', 'Rev'], 3))
Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 4))
Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], 5))
Title_Dict.update(dict.fromkeys(['Mr'], 6))
Title_Dict.update(dict.fromkeys(['Master'], 7))
return Title_Dict
def transFamSizToLable(x):
if x >= 2 and x <= 4:
return 0
if x > 7:
return 2
return 1
def transAgeToLable(x):
return (int(x) - 1) // 5 + 1
train = pd.read_csv('data\\train.csv')
test = pd.read_csv('data\\test.csv')
allDf = pd.concat([train, test], ignore_index = True)
Title_Dict = makeTitleDict()
allDf.drop(['PassengerId', 'Ticket', 'Fare', 'Cabin'], axis = 1, inplace = True)
allDf['Title'] = allDf.Name.str.extract(r' ([A-Za-z]+)\.', expand=False)
allDf['Title'] = allDf['Title'].map(Title_Dict)
allDf.drop(['Name'], axis = 1, inplace = True)
allDf['Embarked'] = allDf['Embarked'].fillna('S')
allDf['FamilySize'] = allDf['SibSp'] + allDf['Parch'] + 1
allDf['FamilyLable'] = allDf['FamilySize'].apply(transFamSizToLable)
allDf['Sex'] = allDf['Sex'].map({'male': 0, 'female': 1})
allDf['Embarked'] = allDf['Embarked'].map({'C': 0, 'S': 1, 'Q': 2})
def fillMissingAge(df):
process_df = df[['Age', 'Pclass', 'Title', 'Sex', 'SibSp', 'Parch', 'FamilySize', 'FamilyLable']]
known = process_df[process_df.Age.notnull()].iloc[:, :].values
unknown = process_df[process_df.Age.isnull()].iloc[:, :].values
X = known[:, 1:]
y = known[:, 0]
rfr = RandomForestRegressor(random_state = 0, n_estimators = 200, max_depth = 6, n_jobs = -1)
rfr.fit(X,y)
predicted = rfr.predict(unknown[:, 1:]).round(0)
df.loc[df.Age.isnull(), 'Age'] = predicted
return df
allDf = fillMissingAge(allDf)
allDf['AgeLable'] = allDf['Age'].apply(transAgeToLable)
allDf.to_csv("allData.csv")
print("Data processing ended successfully.", file = sys.stderr)
| '''
dataProcessing.py:用于处理数据。
Author:HeRaNO
'''
import re
import sys
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
def makeTitleDict():
Title_Dict = {}
Title_Dict.update(dict.fromkeys(['Capt', 'Col', 'Major'], 1))
Title_Dict.update(dict.fromkeys(['Jonkheer', 'Sir', 'Countess', 'Lady', 'Don', 'Dona'], 2))
Title_Dict.update(dict.fromkeys(['Dr', 'Rev'], 3))
Title_Dict.update(dict.fromkeys(['Mme', 'Ms', 'Mrs'], 4))
Title_Dict.update(dict.fromkeys(['Mlle', 'Miss'], 5))
Title_Dict.update(dict.fromkeys(['Mr'], 6))
Title_Dict.update(dict.fromkeys(['Master'], 7))
return Title_Dict
def transFamSizToLable(x):
if x >= 2 and x <= 4:
return 0
if x > 7:
return 2
return 1
def transAgeToLable(x):
return (int(x) - 1) // 5 + 1
train = pd.read_csv('data\\train.csv')
test = pd.read_csv('data\\test.csv')
allDf = pd.concat([train, test], ignore_index = True)
Title_Dict = makeTitleDict()
allDf.drop(['PassengerId', 'Ticket', 'Fare', 'Cabin'], axis = 1, inplace = True)
allDf['Title'] = allDf.Name.str.extract(r' ([A-Za-z]+)\.', expand=False)
allDf['Title'] = allDf['Title'].map(Title_Dict)
allDf.drop(['Name'], axis = 1, inplace = True)
allDf['Embarked'] = allDf['Embarked'].fillna('S')
allDf['FamilySize'] = allDf['SibSp'] + allDf['Parch'] + 1
allDf['FamilyLable'] = allDf['FamilySize'].apply(transFamSizToLable)
allDf['Sex'] = allDf['Sex'].map({'male': 0, 'female': 1})
allDf['Embarked'] = allDf['Embarked'].map({'C': 0, 'S': 1, 'Q': 2})
def fillMissingAge(df):
process_df = df[['Age', 'Pclass', 'Title', 'Sex', 'SibSp', 'Parch', 'FamilySize', 'FamilyLable']]
known = process_df[process_df.Age.notnull()].iloc[:, :].values
unknown = process_df[process_df.Age.isnull()].iloc[:, :].values
X = known[:, 1:]
y = known[:, 0]
rfr = RandomForestRegressor(random_state = 0, n_estimators = 200, max_depth = 6, n_jobs = -1)
rfr.fit(X,y)
predicted = rfr.predict(unknown[:, 1:]).round(0)
df.loc[df.Age.isnull(), 'Age'] = predicted
return df
allDf = fillMissingAge(allDf)
allDf['AgeLable'] = allDf['Age'].apply(transAgeToLable)
allDf.to_csv("allData.csv")
print("Data processing ended successfully.", file = sys.stderr) | zh | 0.697422 | dataProcessing.py:用于处理数据。 Author:HeRaNO | 3.096331 | 3 |
contrib/python/visioncpp/util.py | GeorgeWeb/visioncpp | 0 | 6612340 | """
Utility functions for Python VisionCpp interface.
"""
import struct
import imghdr
def foreach(func, iterable):
"""
Call function for each element of an iterable. Basically, it's a list
comprehension without a return value.
Arguments:
func (function): Function to call.
iterable (iterable): Sequence of items.
"""
for item in iterable:
func(item)
def call_if_attribute(obj, attr, *args, **kwargs):
"""
Call object method, if it exists.
Arguments:
obj (object): The object.
attr (str): The name of the object method.
*args (optional): Arguments for method.
**kwargs (optional): Keyword arguments for method.
Returns:
Return value of calling method, or None if object does not have method.
"""
op = getattr(obj, attr, None)
if callable(op):
return op(*args, **kwargs)
def get_attribute(obj, attr):
"""
Return object attribute value, if it exists.
Arguments:
obj (object): The object.
attr (str): The name of the object attribute.
"""
at = getattr(obj, attr, None)
if at:
return at
else:
return None
def get_image_size(fname):
"""
Return the size of an image, in pixels.
Arguments:
fname (str): Path to image, either png, gif, or jpeg type.
Returns:
(int, int): The dimensions of the image, width x height.
"""
import visioncpp as vp # needed for VisionCppException type
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
raise vp.VisionCppException('failed to read image')
else:
raise vp.VisionCppException('unsupported image type')
return width, height
| """
Utility functions for Python VisionCpp interface.
"""
import struct
import imghdr
def foreach(func, iterable):
"""
Call function for each element of an iterable. Basically, it's a list
comprehension without a return value.
Arguments:
func (function): Function to call.
iterable (iterable): Sequence of items.
"""
for item in iterable:
func(item)
def call_if_attribute(obj, attr, *args, **kwargs):
"""
Call object method, if it exists.
Arguments:
obj (object): The object.
attr (str): The name of the object method.
*args (optional): Arguments for method.
**kwargs (optional): Keyword arguments for method.
Returns:
Return value of calling method, or None if object does not have method.
"""
op = getattr(obj, attr, None)
if callable(op):
return op(*args, **kwargs)
def get_attribute(obj, attr):
"""
Return object attribute value, if it exists.
Arguments:
obj (object): The object.
attr (str): The name of the object attribute.
"""
at = getattr(obj, attr, None)
if at:
return at
else:
return None
def get_image_size(fname):
"""
Return the size of an image, in pixels.
Arguments:
fname (str): Path to image, either png, gif, or jpeg type.
Returns:
(int, int): The dimensions of the image, width x height.
"""
import visioncpp as vp # needed for VisionCppException type
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
raise vp.VisionCppException('failed to read image')
else:
raise vp.VisionCppException('unsupported image type')
return width, height
| en | 0.527456 | Utility functions for Python VisionCpp interface. Call function for each element of an iterable. Basically, it's a list comprehension without a return value. Arguments: func (function): Function to call. iterable (iterable): Sequence of items. Call object method, if it exists. Arguments: obj (object): The object. attr (str): The name of the object method. *args (optional): Arguments for method. **kwargs (optional): Keyword arguments for method. Returns: Return value of calling method, or None if object does not have method. Return object attribute value, if it exists. Arguments: obj (object): The object. attr (str): The name of the object attribute. Return the size of an image, in pixels. Arguments: fname (str): Path to image, either png, gif, or jpeg type. Returns: (int, int): The dimensions of the image, width x height. # needed for VisionCppException type # Read 0xff next # We are at a SOFn block # Skip `precision' byte. #IGNORE:W0703 | 3.474252 | 3 |
python/openfile.py | A-Little-Tiny-Cloud/SSH-GUI | 0 | 6612341 | '''
打开文件
根据文件类型不同,打开文件的操作也不同.
例如:
- 文本文件,表示打开文件,并编辑(完成后还可以上传)
- 图像文件,显示图像.
- 动态库: 显示依赖关系
'''
import os
import wx
from common import get_safe_tempfile, get_file_md5, MessageBox
from FileType import FileItem, get_global_ft_list
from os_win import os_ext, Excute_and_Wait
def open_file(fi: FileItem, work_path, ssh):
'打开一个文件,根据文件类型不同,实际操作也不同'
# a. 先判断是否可以打开,以及根据文件大小等决定是否提示.
# a. 若文件太大,提示用户先下载.
# b. 若文件较小,判断是否已知的文件类型.
# - 若是已知类型,调用对应的关联程序打开.
# - 若不是已知类型, 需弹出对话框选择打开方式.
# c. 监控文件改变,当文件关闭后,弹出消息框提示用户,上传覆盖.
max_size = 600 * 1024 # 以100K为界限
big = False
# step1, 判断文件情况
if fi.size > max_size:
msg = "当前文件尺寸较大,无法直接编辑。\n\n 可下载后自行编辑\n 需要现在下载吗?"
if MessageBox(None, "提示", msg, wx.ICON_INFORMATION | wx.YES_NO) == wx.ID_NO:
return
big = True
# step2, 下载文件
tmp_path = get_safe_tempfile(work_path)
os.makedirs(tmp_path)
locfile = os.path.join(tmp_path, fi.name)
ssh.get_file(fi.full_path(), locfile)
if big:
return
sz = os.path.getsize(locfile)
md5 = get_file_md5(locfile)
# step3, 根据文件类型,打开已知类型, 或调用操作系统打开
if fi.mime == 'text/plain': # 简单文本
# 对简单文本文件,由于需要做回城符转换('\n' <--> '\r\n'),
# 应该使用自己的代码编辑文件.
# 目前暂时使用系统默认打开方式(记事本程序)
os_ext.shell_open_and_wait(locfile)
elif fi.mime.startswith('image/'): # 图像
# 对图像来说,如果扩展名不正确,直接调用 shell_open_and_wait 无法打开.
# 因为该函数内部根据扩展名来判断文件类型, 扩展名不对则不能判断文件类型.
# 为了避免此情况, 先根据mime得到文件类型, 然后调用关联程序打开.
filetype = get_global_ft_list().ft_list[fi.icon]
open_cmd = filetype.open_cmd
if open_cmd:
cmd = ''
if open_cmd.find('%1') >= 0:
cmd = open_cmd.replace('%1', locfile)
elif open_cmd.find('%L') >= 0:
cmd = open_cmd.replace('%L', locfile)
if cmd:
cmd = cmd.replace('%*', '')
Excute_and_Wait(cmd)
else:
MessageBox(None, "错误", "没有找到关联程序,无法打开文件!")
return
else:
MessageBox(None, "错误", "没有找到关联程序,无法打开文件!")
return
else:
os_ext.shell_open_and_wait(locfile)
# step4, 检测文件是否修改,弹框确认后,上传文件.
sz_ = os.path.getsize(locfile)
md5_ = get_file_md5(locfile)
if sz != sz_ or md5 != md5_:
msg = "检测到您修改了文件:\n\n{}\n是否要将新版本上传到服务器(覆盖老版本)?".format(fi.name)
if MessageBox(None, "提示", msg, wx.YES_NO) == wx.ID_YES:
ssh.put_file(locfile, fi.full_path())
if __name__ == "__main__":
pass
| '''
打开文件
根据文件类型不同,打开文件的操作也不同.
例如:
- 文本文件,表示打开文件,并编辑(完成后还可以上传)
- 图像文件,显示图像.
- 动态库: 显示依赖关系
'''
import os
import wx
from common import get_safe_tempfile, get_file_md5, MessageBox
from FileType import FileItem, get_global_ft_list
from os_win import os_ext, Excute_and_Wait
def open_file(fi: FileItem, work_path, ssh):
'打开一个文件,根据文件类型不同,实际操作也不同'
# a. 先判断是否可以打开,以及根据文件大小等决定是否提示.
# a. 若文件太大,提示用户先下载.
# b. 若文件较小,判断是否已知的文件类型.
# - 若是已知类型,调用对应的关联程序打开.
# - 若不是已知类型, 需弹出对话框选择打开方式.
# c. 监控文件改变,当文件关闭后,弹出消息框提示用户,上传覆盖.
max_size = 600 * 1024 # 以100K为界限
big = False
# step1, 判断文件情况
if fi.size > max_size:
msg = "当前文件尺寸较大,无法直接编辑。\n\n 可下载后自行编辑\n 需要现在下载吗?"
if MessageBox(None, "提示", msg, wx.ICON_INFORMATION | wx.YES_NO) == wx.ID_NO:
return
big = True
# step2, 下载文件
tmp_path = get_safe_tempfile(work_path)
os.makedirs(tmp_path)
locfile = os.path.join(tmp_path, fi.name)
ssh.get_file(fi.full_path(), locfile)
if big:
return
sz = os.path.getsize(locfile)
md5 = get_file_md5(locfile)
# step3, 根据文件类型,打开已知类型, 或调用操作系统打开
if fi.mime == 'text/plain': # 简单文本
# 对简单文本文件,由于需要做回城符转换('\n' <--> '\r\n'),
# 应该使用自己的代码编辑文件.
# 目前暂时使用系统默认打开方式(记事本程序)
os_ext.shell_open_and_wait(locfile)
elif fi.mime.startswith('image/'): # 图像
# 对图像来说,如果扩展名不正确,直接调用 shell_open_and_wait 无法打开.
# 因为该函数内部根据扩展名来判断文件类型, 扩展名不对则不能判断文件类型.
# 为了避免此情况, 先根据mime得到文件类型, 然后调用关联程序打开.
filetype = get_global_ft_list().ft_list[fi.icon]
open_cmd = filetype.open_cmd
if open_cmd:
cmd = ''
if open_cmd.find('%1') >= 0:
cmd = open_cmd.replace('%1', locfile)
elif open_cmd.find('%L') >= 0:
cmd = open_cmd.replace('%L', locfile)
if cmd:
cmd = cmd.replace('%*', '')
Excute_and_Wait(cmd)
else:
MessageBox(None, "错误", "没有找到关联程序,无法打开文件!")
return
else:
MessageBox(None, "错误", "没有找到关联程序,无法打开文件!")
return
else:
os_ext.shell_open_and_wait(locfile)
# step4, 检测文件是否修改,弹框确认后,上传文件.
sz_ = os.path.getsize(locfile)
md5_ = get_file_md5(locfile)
if sz != sz_ or md5 != md5_:
msg = "检测到您修改了文件:\n\n{}\n是否要将新版本上传到服务器(覆盖老版本)?".format(fi.name)
if MessageBox(None, "提示", msg, wx.YES_NO) == wx.ID_YES:
ssh.put_file(locfile, fi.full_path())
if __name__ == "__main__":
pass
| zh | 0.980377 | 打开文件 根据文件类型不同,打开文件的操作也不同. 例如: - 文本文件,表示打开文件,并编辑(完成后还可以上传) - 图像文件,显示图像. - 动态库: 显示依赖关系 # a. 先判断是否可以打开,以及根据文件大小等决定是否提示. # a. 若文件太大,提示用户先下载. # b. 若文件较小,判断是否已知的文件类型. # - 若是已知类型,调用对应的关联程序打开. # - 若不是已知类型, 需弹出对话框选择打开方式. # c. 监控文件改变,当文件关闭后,弹出消息框提示用户,上传覆盖. # 以100K为界限 # step1, 判断文件情况 # step2, 下载文件 # step3, 根据文件类型,打开已知类型, 或调用操作系统打开 # 简单文本 # 对简单文本文件,由于需要做回城符转换('\n' <--> '\r\n'), # 应该使用自己的代码编辑文件. # 目前暂时使用系统默认打开方式(记事本程序) # 图像 # 对图像来说,如果扩展名不正确,直接调用 shell_open_and_wait 无法打开. # 因为该函数内部根据扩展名来判断文件类型, 扩展名不对则不能判断文件类型. # 为了避免此情况, 先根据mime得到文件类型, 然后调用关联程序打开. # step4, 检测文件是否修改,弹框确认后,上传文件. | 2.248055 | 2 |
modules/thiccbeef.py | Iangecko/arbys | 0 | 6612342 | <filename>modules/thiccbeef.py
from client import client
import discord
import random
images = ["https://cdn.discordapp.com/attachments/364488710995050496/398597078873407499/vRutZA3.jpg",
"https://vignette.wikia.nocookie.net/uncyclopedia/images/0/0d/Fat_cow.jpg",
"https://i.ytimg.com/vi/J3X-ufvxmdo/hqdefault.jpg",
"http://www.healthwantcare.com/wp-content/uploads/2014/07/cow-600x337.jpg",
"https://cdn.discordapp.com/attachments/504486947146694668/540577741586694164/image0.png",
]
@client.command(trigger="thiccbeef")
async def command(command: str, message: discord.Message):
e = discord.Embed(title=discord.Embed.Empty, description=discord.Embed.Empty, colour=discord.Embed.Empty)
e = e.set_image(url=random.choice(images))
await message.channel.send(embed=e)
return
| <filename>modules/thiccbeef.py
from client import client
import discord
import random
images = ["https://cdn.discordapp.com/attachments/364488710995050496/398597078873407499/vRutZA3.jpg",
"https://vignette.wikia.nocookie.net/uncyclopedia/images/0/0d/Fat_cow.jpg",
"https://i.ytimg.com/vi/J3X-ufvxmdo/hqdefault.jpg",
"http://www.healthwantcare.com/wp-content/uploads/2014/07/cow-600x337.jpg",
"https://cdn.discordapp.com/attachments/504486947146694668/540577741586694164/image0.png",
]
@client.command(trigger="thiccbeef")
async def command(command: str, message: discord.Message):
e = discord.Embed(title=discord.Embed.Empty, description=discord.Embed.Empty, colour=discord.Embed.Empty)
e = e.set_image(url=random.choice(images))
await message.channel.send(embed=e)
return
| none | 1 | 2.819644 | 3 | |
resumecollection/resume/urls.py | m-ali-ubit/resume-collection | 0 | 6612343 | from django.urls import path, include
app_name = "resume"
urlpatterns = [
path("", include("resumecollection.resume.v1.urls"))
]
| from django.urls import path, include
app_name = "resume"
urlpatterns = [
path("", include("resumecollection.resume.v1.urls"))
]
| none | 1 | 1.525209 | 2 | |
clarke.py | aywagner/TDA-smear | 5 | 6612344 | import numpy as np
import matplotlib.pyplot as plt
import topnet
import imageio
if __name__ == '__main__':
# Grab and normalize blobs image.
f = -1 * np.asarray(imageio.imread('Data/blobs.png')[:256, :256, 0], dtype=np.float32)
f -= f.min()
f /= f.max()
f *= 255
# Parameters
hom_dim = 0
card = 10000
kernel_size = 4
pool_mode = 'simplex'
eps = 50.0
num_perturb = 100
pers_cutoff = 50
# Generate noisy, downsampled gradients
f_down = topnet.spool(f, kernel_size, pool_mode)[0]
grads = np.zeros((num_perturb, np.prod(f_down.shape)))
for i in range(num_perturb):
noisy_f = f + np.random.uniform(-eps, eps, f.shape)
f_down = topnet.spool(noisy_f, kernel_size, pool_mode)[0]
dgm, cof = topnet.compute_dgm(f_down, card, hom_dim)
big_pers = (dgm[:, 1] - dgm[:, 0]) > pers_cutoff
dgm, cof = dgm[big_pers, :], cof[big_pers, :]
grad_dgm = np.zeros(dgm.shape)
grad_dgm[:, 0], grad_dgm[:, 1] = 2 * (dgm[:, 0] - dgm[:, 1]), 2 * (dgm[:, 1] - dgm[:, 0])
bsm, dsm = topnet.compute_dgm_grad(grad_dgm, cof, f_down)
grads[i, :] = dsm.ravel()
# Visualize inner products of gradients
G = np.dot(grads, grads.T)
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.imshow(G)
plt.axis('off')
plt.colorbar(fraction=0.046, pad=0.04)
s = np.diag(G)
cs = (1 / s) / np.sum(1 / s)
plt.subplot(122)
plt.plot(cs)
plt.ylim(0, .04)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
| import numpy as np
import matplotlib.pyplot as plt
import topnet
import imageio
if __name__ == '__main__':
# Grab and normalize blobs image.
f = -1 * np.asarray(imageio.imread('Data/blobs.png')[:256, :256, 0], dtype=np.float32)
f -= f.min()
f /= f.max()
f *= 255
# Parameters
hom_dim = 0
card = 10000
kernel_size = 4
pool_mode = 'simplex'
eps = 50.0
num_perturb = 100
pers_cutoff = 50
# Generate noisy, downsampled gradients
f_down = topnet.spool(f, kernel_size, pool_mode)[0]
grads = np.zeros((num_perturb, np.prod(f_down.shape)))
for i in range(num_perturb):
noisy_f = f + np.random.uniform(-eps, eps, f.shape)
f_down = topnet.spool(noisy_f, kernel_size, pool_mode)[0]
dgm, cof = topnet.compute_dgm(f_down, card, hom_dim)
big_pers = (dgm[:, 1] - dgm[:, 0]) > pers_cutoff
dgm, cof = dgm[big_pers, :], cof[big_pers, :]
grad_dgm = np.zeros(dgm.shape)
grad_dgm[:, 0], grad_dgm[:, 1] = 2 * (dgm[:, 0] - dgm[:, 1]), 2 * (dgm[:, 1] - dgm[:, 0])
bsm, dsm = topnet.compute_dgm_grad(grad_dgm, cof, f_down)
grads[i, :] = dsm.ravel()
# Visualize inner products of gradients
G = np.dot(grads, grads.T)
plt.figure(figsize=(16, 8))
plt.subplot(121)
plt.imshow(G)
plt.axis('off')
plt.colorbar(fraction=0.046, pad=0.04)
s = np.diag(G)
cs = (1 / s) / np.sum(1 / s)
plt.subplot(122)
plt.plot(cs)
plt.ylim(0, .04)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
| en | 0.531922 | # Grab and normalize blobs image. # Parameters # Generate noisy, downsampled gradients # Visualize inner products of gradients | 2.366477 | 2 |
avoto.py | lightop/touch2click | 0 | 6612345 | <filename>avoto.py
import argparse
import math
import pyautogui
from pythonosc import dispatcher
from pythonosc import osc_server
import asyncio
import json
pyautogui.PAUSE = 0.01
pyautogui.FAILSAFE = True
PREFIX = "avo"
faderData = [
(1,127,702,127,620),
(2,206,702,206,620),
(3,285,702,285,620),
(4,364,702,364,620),
(5,443,702,443,620),
(6,522,702,522,620),
(7,601,702,601,620),
(8,680,702,680,620),
(9,759,702,759,620),
(10,838,702,838,620),
]
encoderData = [
(1,1070,580,10,0),
(2,1190,580,10,0),
(3,1310,580,10,0),
]
buttonData = [
('ws1',50,68),
('ws2',50,101),
('ws3',50,134),
('ws4',50,167),
('ws5',50,200),
('ws6',50,233),
('ws7',50,266),
('ws8',50,299),
('ws9',50,332),
('ws10',50,365),
('pageplus', 45, 506),
('pageminus',45, 678),
('flash1',127,586),
('flash2',206,586),
('flash3',285,586),
('flash4',364,586),
('flash5',443,586),
('flash6',522,586),
('flash7',601,586),
('flash8',680,586),
('flash9',759,586),
('flash10',838,586),
('swop1',127,545),
('swop2',206,545),
('swop3',285,545),
('swop4',364,545),
('swop5',443,545),
('swop6',522,545),
('swop7',601,545),
('swop8',680,545),
('swop9',759,545),
('swop10',838,545),
('intensity',1029,538),
('position',1071,538),
('color',1113,538),
('gobo',1155,538),
('beam',1197,538),
('effect',1239,538),
('special',1281,538),
('fx',1323,538),
('encoder1up',1074,646),
('encoder2up',1188,646),
('encoder3up',1304,646),
('encoder1down',1074,698),
('encoder2down',1188,698),
('encoder3down',1304,698),
]
keyData = [
('A', ['alt','1']),
('B', ['alt','2']),
('C', ['alt','3']),
('D', ['alt','4']),
('E', ['alt','5']),
('F', ['alt','6']),
('G', ['alt', '7']),
('1',['1']),
('2',['2']),
('3',['3']),
('4',['4']),
('5',['5']),
('6',['6']),
('7',['7']),
('8',['8']),
('9',['9']),
('0',['0']),
('enter',['enter']),
('exit',['esc']),
('avo',['alt', 'a']),
('clear',['alt','c']),
('f1', ['f1']),
('f2', ['f2']),
('f3', ['f3']),
('f3s', ['shift','f3']),
('f4', ['f4']),
('f4s', ['shift','f4']),
('f5', ['f5']),
('f5s', ['shift','f5']),
('f6', ['f6']),
('f7', ['f7']),
('f8', ['f8']),
('f9', ['f9']),
('f10', ['f10']),
('f11', ['f11']),
('f12', ['f12']),
('fixture',['alt','shift','f']),
('palette',['alt','shit','p']),
('macro',['alt','shift','m']),
('group',['alt','shift','g']),
('thro',['divide']),
('at',['multiply']),
('not',['subtract']),
('and',['add']),
('undo',['ctrl','z']),
('record',['alt','r']),
('locate', ['alt', 'l']),
('patch', ['alt','p']),
('disk', ['alt', 'shift','d']),
('system', ['alt', 'shift','s']),
('view', ['alt','v']),
('go', ['alt', 'g']),
('delete', ['alt', 'd']),
('copy', ['alt','shift','c']),
('move', ['alt', 'm']),
('unfold', ['alt','u']),
('include', ['alt', 'i']),
('release', ['alt','shift','r']),
('shape', ['alt', 's']),
('mlmenu', ['alt', 't']),
('blind', ['alt','b']),
('off', ['alt','o']),
('fan', ['alt', 'f']),
('options', ['alt','shift','o']),
('latch', ['alt','shift','l']),
('fixprev', ['alt', 'left']),
('fixnext', ['alt', 'right']),
('all', ['alt','up']),
('highlight', ['alt', 'down']),
]
def gen_osc_addr (type, number):
osc_addr = "/" + PREFIX + "/" + type +"/"+str(number)
print (osc_addr)
return (osc_addr)
def store_data (filename):
print (filename)
data = (buttonData, faderData, keyData, encoderData)
#self.filename = filename
with open (filename, 'w') as f:
json.dump(data, f, indent = 2)
class TTCButton ():
def __init__(self,number,x,y):
self.number = number
self.x = x
self.y = y
self.type = "button"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler, x, y )
def handler (self, unused_addr,args, volume):
if volume == 1.0:
pyautogui.mouseDown (args[0], args[1])
if volume == 0.0:
pyautogui.mouseUp ()
class TTCFader ():
def __init__(self, number, x_zero, y_zero, x_full, y_full):
self.number = number
self.x_zero = x_zero
self.y_zero = y_zero
self.x_full = x_full
self.y_full = y_full
self.y_size = self.y_zero - self.y_full
self.x_size = self.x_zero - self.x_full
self.type = "fader"
self.x_level = 0
self.y_level = 0
self.osc_addr = gen_osc_addr (self.type, self.number)
print (self.osc_addr)
dispatcher.map (self.osc_addr, self.handler, self.x_zero, self.y_zero)
dispatcher.map (self.osc_addr+"/z",self.handler_z, self.x_zero, self.y_zero)
def handler (self, unused_addr, args, volume):
self.y_level = volume*self.y_size
self.x_level = volume*self.x_size
pyautogui.moveTo(self.x_zero - self.x_level, self.y_zero-self.y_level)
def handler_z (self, unused_addr,args, volume):
if (volume == 1):
pyautogui.mouseDown(self.x_zero - self.x_level,self.y_zero - self.y_level)
if (volume ==0):
pyautogui.mouseUp()
class TTCEncoder ():
def __init__(self, number,x,y,h,v):
self.number = number
self.x = x
self.y = y
self.h = h
self.v = v
self.type = "encoder"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler,self.x, self.y)
dispatcher.map (self.osc_addr+"/z", self.handler_z, self.x, self.y)
def handler(self,unused_addr,args,volume):
if (volume == 1.0):
try:
pyautogui.moveRel(self.h, self.v)
except (RuntimeError,ValueError): pass
if (volume == 0.0):
try:
pyautogui.moveRel(self.h*-1,self.v*-1)
except (RuntimeError,ValueError): pass
def handler_z(self, unused_addr, args, volume):
if (volume == 1.0):
try:
pyautogui.mouseDown (args[0], args[1])
except (RuntimeError,ValueError): pass
if (volume == 0.0):
try:
pyautogui.mouseUp()
except (RuntfimeError,ValueError): pass
class TTCKey ():
def __init__(self,number,key):
self.number = number
self.key = key
self.type = "button"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler, self.key )
def handler (self, unused_addr,args, volume):
if volume == 1.0 :
pyautogui.hotkey (*args[0], interval = 0.1)
print (*args[0])
# for i in args[0]:
# pyautogui.keyDown (i)
# print (i)
# if volume == 0.0:
# for i in args[0]:
# pyautogui.keyUp (i)
# print (i)
# def b_handler (unused_addr,args,volume):
# if volume == 1.0 :
# pyautogui.click (args[0], args[1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip",
default="192.168.1.123", help="The ip to listen on")
parser.add_argument("--port",
type=int, default=8000, help="The port to listen on")
parser.add_argument ("--file", help="Filename")
args = parser.parse_args()
dispatcher = dispatcher.Dispatcher()
faders = []
for x in faderData:
f= TTCFader (*x)
faders.append (f)
encoders =[]
for x in encoderData:
e = TTCEncoder (*x)
encoders.append (e)
buttons = []
for x in buttonData:
b = TTCButton (*x)
buttons.append(b)
keys = []
#print (keyData)
for x,y in keyData:
k = TTCKey (x, y)
loop = asyncio.get_event_loop()
server = osc_server.AsyncIOOSCUDPServer((args.ip, args.port), dispatcher, loop)
print ("Serving on {}".format((args.ip, args.port)))
server.serve()
loop.run_forever()
| <filename>avoto.py
import argparse
import math
import pyautogui
from pythonosc import dispatcher
from pythonosc import osc_server
import asyncio
import json
pyautogui.PAUSE = 0.01
pyautogui.FAILSAFE = True
PREFIX = "avo"
faderData = [
(1,127,702,127,620),
(2,206,702,206,620),
(3,285,702,285,620),
(4,364,702,364,620),
(5,443,702,443,620),
(6,522,702,522,620),
(7,601,702,601,620),
(8,680,702,680,620),
(9,759,702,759,620),
(10,838,702,838,620),
]
encoderData = [
(1,1070,580,10,0),
(2,1190,580,10,0),
(3,1310,580,10,0),
]
buttonData = [
('ws1',50,68),
('ws2',50,101),
('ws3',50,134),
('ws4',50,167),
('ws5',50,200),
('ws6',50,233),
('ws7',50,266),
('ws8',50,299),
('ws9',50,332),
('ws10',50,365),
('pageplus', 45, 506),
('pageminus',45, 678),
('flash1',127,586),
('flash2',206,586),
('flash3',285,586),
('flash4',364,586),
('flash5',443,586),
('flash6',522,586),
('flash7',601,586),
('flash8',680,586),
('flash9',759,586),
('flash10',838,586),
('swop1',127,545),
('swop2',206,545),
('swop3',285,545),
('swop4',364,545),
('swop5',443,545),
('swop6',522,545),
('swop7',601,545),
('swop8',680,545),
('swop9',759,545),
('swop10',838,545),
('intensity',1029,538),
('position',1071,538),
('color',1113,538),
('gobo',1155,538),
('beam',1197,538),
('effect',1239,538),
('special',1281,538),
('fx',1323,538),
('encoder1up',1074,646),
('encoder2up',1188,646),
('encoder3up',1304,646),
('encoder1down',1074,698),
('encoder2down',1188,698),
('encoder3down',1304,698),
]
keyData = [
('A', ['alt','1']),
('B', ['alt','2']),
('C', ['alt','3']),
('D', ['alt','4']),
('E', ['alt','5']),
('F', ['alt','6']),
('G', ['alt', '7']),
('1',['1']),
('2',['2']),
('3',['3']),
('4',['4']),
('5',['5']),
('6',['6']),
('7',['7']),
('8',['8']),
('9',['9']),
('0',['0']),
('enter',['enter']),
('exit',['esc']),
('avo',['alt', 'a']),
('clear',['alt','c']),
('f1', ['f1']),
('f2', ['f2']),
('f3', ['f3']),
('f3s', ['shift','f3']),
('f4', ['f4']),
('f4s', ['shift','f4']),
('f5', ['f5']),
('f5s', ['shift','f5']),
('f6', ['f6']),
('f7', ['f7']),
('f8', ['f8']),
('f9', ['f9']),
('f10', ['f10']),
('f11', ['f11']),
('f12', ['f12']),
('fixture',['alt','shift','f']),
('palette',['alt','shit','p']),
('macro',['alt','shift','m']),
('group',['alt','shift','g']),
('thro',['divide']),
('at',['multiply']),
('not',['subtract']),
('and',['add']),
('undo',['ctrl','z']),
('record',['alt','r']),
('locate', ['alt', 'l']),
('patch', ['alt','p']),
('disk', ['alt', 'shift','d']),
('system', ['alt', 'shift','s']),
('view', ['alt','v']),
('go', ['alt', 'g']),
('delete', ['alt', 'd']),
('copy', ['alt','shift','c']),
('move', ['alt', 'm']),
('unfold', ['alt','u']),
('include', ['alt', 'i']),
('release', ['alt','shift','r']),
('shape', ['alt', 's']),
('mlmenu', ['alt', 't']),
('blind', ['alt','b']),
('off', ['alt','o']),
('fan', ['alt', 'f']),
('options', ['alt','shift','o']),
('latch', ['alt','shift','l']),
('fixprev', ['alt', 'left']),
('fixnext', ['alt', 'right']),
('all', ['alt','up']),
('highlight', ['alt', 'down']),
]
def gen_osc_addr (type, number):
osc_addr = "/" + PREFIX + "/" + type +"/"+str(number)
print (osc_addr)
return (osc_addr)
def store_data (filename):
print (filename)
data = (buttonData, faderData, keyData, encoderData)
#self.filename = filename
with open (filename, 'w') as f:
json.dump(data, f, indent = 2)
class TTCButton ():
def __init__(self,number,x,y):
self.number = number
self.x = x
self.y = y
self.type = "button"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler, x, y )
def handler (self, unused_addr,args, volume):
if volume == 1.0:
pyautogui.mouseDown (args[0], args[1])
if volume == 0.0:
pyautogui.mouseUp ()
class TTCFader ():
def __init__(self, number, x_zero, y_zero, x_full, y_full):
self.number = number
self.x_zero = x_zero
self.y_zero = y_zero
self.x_full = x_full
self.y_full = y_full
self.y_size = self.y_zero - self.y_full
self.x_size = self.x_zero - self.x_full
self.type = "fader"
self.x_level = 0
self.y_level = 0
self.osc_addr = gen_osc_addr (self.type, self.number)
print (self.osc_addr)
dispatcher.map (self.osc_addr, self.handler, self.x_zero, self.y_zero)
dispatcher.map (self.osc_addr+"/z",self.handler_z, self.x_zero, self.y_zero)
def handler (self, unused_addr, args, volume):
self.y_level = volume*self.y_size
self.x_level = volume*self.x_size
pyautogui.moveTo(self.x_zero - self.x_level, self.y_zero-self.y_level)
def handler_z (self, unused_addr,args, volume):
if (volume == 1):
pyautogui.mouseDown(self.x_zero - self.x_level,self.y_zero - self.y_level)
if (volume ==0):
pyautogui.mouseUp()
class TTCEncoder ():
def __init__(self, number,x,y,h,v):
self.number = number
self.x = x
self.y = y
self.h = h
self.v = v
self.type = "encoder"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler,self.x, self.y)
dispatcher.map (self.osc_addr+"/z", self.handler_z, self.x, self.y)
def handler(self,unused_addr,args,volume):
if (volume == 1.0):
try:
pyautogui.moveRel(self.h, self.v)
except (RuntimeError,ValueError): pass
if (volume == 0.0):
try:
pyautogui.moveRel(self.h*-1,self.v*-1)
except (RuntimeError,ValueError): pass
def handler_z(self, unused_addr, args, volume):
if (volume == 1.0):
try:
pyautogui.mouseDown (args[0], args[1])
except (RuntimeError,ValueError): pass
if (volume == 0.0):
try:
pyautogui.mouseUp()
except (RuntfimeError,ValueError): pass
class TTCKey ():
def __init__(self,number,key):
self.number = number
self.key = key
self.type = "button"
self.osc_addr = gen_osc_addr(self.type, self.number)
dispatcher.map (self.osc_addr, self.handler, self.key )
def handler (self, unused_addr,args, volume):
if volume == 1.0 :
pyautogui.hotkey (*args[0], interval = 0.1)
print (*args[0])
# for i in args[0]:
# pyautogui.keyDown (i)
# print (i)
# if volume == 0.0:
# for i in args[0]:
# pyautogui.keyUp (i)
# print (i)
# def b_handler (unused_addr,args,volume):
# if volume == 1.0 :
# pyautogui.click (args[0], args[1])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip",
default="192.168.1.123", help="The ip to listen on")
parser.add_argument("--port",
type=int, default=8000, help="The port to listen on")
parser.add_argument ("--file", help="Filename")
args = parser.parse_args()
dispatcher = dispatcher.Dispatcher()
faders = []
for x in faderData:
f= TTCFader (*x)
faders.append (f)
encoders =[]
for x in encoderData:
e = TTCEncoder (*x)
encoders.append (e)
buttons = []
for x in buttonData:
b = TTCButton (*x)
buttons.append(b)
keys = []
#print (keyData)
for x,y in keyData:
k = TTCKey (x, y)
loop = asyncio.get_event_loop()
server = osc_server.AsyncIOOSCUDPServer((args.ip, args.port), dispatcher, loop)
print ("Serving on {}".format((args.ip, args.port)))
server.serve()
loop.run_forever()
| en | 0.278928 | #self.filename = filename # for i in args[0]: # pyautogui.keyDown (i) # print (i) # if volume == 0.0: # for i in args[0]: # pyautogui.keyUp (i) # print (i) # def b_handler (unused_addr,args,volume): # if volume == 1.0 : # pyautogui.click (args[0], args[1]) #print (keyData) | 2.328925 | 2 |
sympy/physics/optics/tests/test_polarization.py | msgoff/sympy | 0 | 6612346 | from sympy.physics.optics.polarization import (
jones_vector,
stokes_vector,
jones_2_stokes,
linear_polarizer,
phase_retarder,
half_wave_retarder,
quarter_wave_retarder,
transmissive_filter,
reflective_filter,
mueller_matrix,
polarizing_beam_splitter,
)
from sympy import Matrix, pi, symbols, exp, I, S
def test_polarization():
assert jones_vector(0, 0) == Matrix([1, 0])
assert jones_vector(pi / 2, 0) == Matrix([0, 1])
#################################################################
assert stokes_vector(0, 0) == Matrix([1, 1, 0, 0])
assert stokes_vector(pi / 2, 0) == Matrix([1, -1, 0, 0])
#################################################################
H = jones_vector(0, 0)
V = jones_vector(pi / 2, 0)
D = jones_vector(pi / 4, 0)
A = jones_vector(-pi / 4, 0)
R = jones_vector(0, pi / 4)
L = jones_vector(0, -pi / 4)
res = [
Matrix([1, 1, 0, 0]),
Matrix([1, -1, 0, 0]),
Matrix([1, 0, 1, 0]),
Matrix([1, 0, -1, 0]),
Matrix([1, 0, 0, 1]),
Matrix([1, 0, 0, -1]),
]
assert [jones_2_stokes(e) for e in [H, V, D, A, R, L]] == res
#################################################################
assert linear_polarizer(0) == Matrix([[1, 0], [0, 0]])
#################################################################
delta = symbols("delta", real=True)
res = Matrix([[exp(-I * delta / 2), 0], [0, exp(I * delta / 2)]])
assert phase_retarder(0, delta) == res
#################################################################
assert half_wave_retarder(0) == Matrix([[-I, 0], [0, I]])
#################################################################
res = Matrix([[exp(-I * pi / 4), 0], [0, I * exp(-I * pi / 4)]])
assert quarter_wave_retarder(0) == res
#################################################################
assert transmissive_filter(1) == Matrix([[1, 0], [0, 1]])
#################################################################
assert reflective_filter(1) == Matrix([[1, 0], [0, -1]])
res = Matrix(
[
[S(1) / 2, S(1) / 2, 0, 0],
[S(1) / 2, S(1) / 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
)
assert mueller_matrix(linear_polarizer(0)) == res
#################################################################
res = Matrix([[1, 0, 0, 0], [0, 0, 0, -I], [0, 0, 1, 0], [0, -I, 0, 0]])
assert polarizing_beam_splitter() == res
| from sympy.physics.optics.polarization import (
jones_vector,
stokes_vector,
jones_2_stokes,
linear_polarizer,
phase_retarder,
half_wave_retarder,
quarter_wave_retarder,
transmissive_filter,
reflective_filter,
mueller_matrix,
polarizing_beam_splitter,
)
from sympy import Matrix, pi, symbols, exp, I, S
def test_polarization():
assert jones_vector(0, 0) == Matrix([1, 0])
assert jones_vector(pi / 2, 0) == Matrix([0, 1])
#################################################################
assert stokes_vector(0, 0) == Matrix([1, 1, 0, 0])
assert stokes_vector(pi / 2, 0) == Matrix([1, -1, 0, 0])
#################################################################
H = jones_vector(0, 0)
V = jones_vector(pi / 2, 0)
D = jones_vector(pi / 4, 0)
A = jones_vector(-pi / 4, 0)
R = jones_vector(0, pi / 4)
L = jones_vector(0, -pi / 4)
res = [
Matrix([1, 1, 0, 0]),
Matrix([1, -1, 0, 0]),
Matrix([1, 0, 1, 0]),
Matrix([1, 0, -1, 0]),
Matrix([1, 0, 0, 1]),
Matrix([1, 0, 0, -1]),
]
assert [jones_2_stokes(e) for e in [H, V, D, A, R, L]] == res
#################################################################
assert linear_polarizer(0) == Matrix([[1, 0], [0, 0]])
#################################################################
delta = symbols("delta", real=True)
res = Matrix([[exp(-I * delta / 2), 0], [0, exp(I * delta / 2)]])
assert phase_retarder(0, delta) == res
#################################################################
assert half_wave_retarder(0) == Matrix([[-I, 0], [0, I]])
#################################################################
res = Matrix([[exp(-I * pi / 4), 0], [0, I * exp(-I * pi / 4)]])
assert quarter_wave_retarder(0) == res
#################################################################
assert transmissive_filter(1) == Matrix([[1, 0], [0, 1]])
#################################################################
assert reflective_filter(1) == Matrix([[1, 0], [0, -1]])
res = Matrix(
[
[S(1) / 2, S(1) / 2, 0, 0],
[S(1) / 2, S(1) / 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
)
assert mueller_matrix(linear_polarizer(0)) == res
#################################################################
res = Matrix([[1, 0, 0, 0], [0, 0, 0, -I], [0, 0, 1, 0], [0, -I, 0, 0]])
assert polarizing_beam_splitter() == res
| de | 0.87075 | ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# ################################################################# | 2.7226 | 3 |
places365/similar_words.py | sanjeevg15/syncphonic | 0 | 6612347 | <filename>places365/similar_words.py
import gensim.downloader as api
from gensim.models import KeyedVectors
from run_placesCNN_basic import get_image_tags
#from visualize import display_pca_scatterplot
# if __name__ == "__main__":
def similar(img):
info = api.info()
model = api.load("word2vec-google-news-300")
#model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300-SLIM.bin.gz', binary=True)
# moods = ['Uplifting',
# 'Epic',
# 'Powerful',
# 'Exciting',
# 'Happy',
# 'Funny',
# 'Carefree',
# 'Hopeful',
# 'Love',
# 'Playful',
# 'Groovy',
# 'Sexy',
# 'Peaceful',
# 'Mysterious',
# 'Serious',
# 'Dramatic',
# 'Angry',
# 'Tense',
# 'Sad', ]
scenes = ['Landscape', 'Nature', 'Sports', 'Food', 'Buildings', 'Art', 'Technology' , 'Roadtrip']
scene_info = []
base_words = get_image_tags(img)
#print("scene")
for i in base_words:
scene_info.append(i[1])
mood_similarities = [0] * len(scenes)
for weight, base_word in base_words:
for i, mood in enumerate(scenes):
try:
base_word = base_word.split('_')[0]
base_word = base_word.split('/')[0]
cur_similarity = model.similarity(base_word, mood.lower())
#print(cur_similarity)
mood_similarities[i] += weight * cur_similarity
print("Similarity between {} and {}".format(base_word, mood.lower()), cur_similarity)
except:
continue
mood_similarities = [x / len(base_words) for x in mood_similarities]
sorted_moods = [y for x, y in sorted(zip(mood_similarities, scenes), key=lambda x: x[0])]
sorted_moods.reverse()
print(list(zip(scenes, mood_similarities)))
print(sorted_moods)
sorted_moods = [x.lower() for x in sorted_moods]
#print(sorted_moods[0])
final_mood = sorted_moods[0]
#print(scene_info)
return scene_info, final_mood
# scene_info = similar()
| <filename>places365/similar_words.py
import gensim.downloader as api
from gensim.models import KeyedVectors
from run_placesCNN_basic import get_image_tags
#from visualize import display_pca_scatterplot
# if __name__ == "__main__":
def similar(img):
info = api.info()
model = api.load("word2vec-google-news-300")
#model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300-SLIM.bin.gz', binary=True)
# moods = ['Uplifting',
# 'Epic',
# 'Powerful',
# 'Exciting',
# 'Happy',
# 'Funny',
# 'Carefree',
# 'Hopeful',
# 'Love',
# 'Playful',
# 'Groovy',
# 'Sexy',
# 'Peaceful',
# 'Mysterious',
# 'Serious',
# 'Dramatic',
# 'Angry',
# 'Tense',
# 'Sad', ]
scenes = ['Landscape', 'Nature', 'Sports', 'Food', 'Buildings', 'Art', 'Technology' , 'Roadtrip']
scene_info = []
base_words = get_image_tags(img)
#print("scene")
for i in base_words:
scene_info.append(i[1])
mood_similarities = [0] * len(scenes)
for weight, base_word in base_words:
for i, mood in enumerate(scenes):
try:
base_word = base_word.split('_')[0]
base_word = base_word.split('/')[0]
cur_similarity = model.similarity(base_word, mood.lower())
#print(cur_similarity)
mood_similarities[i] += weight * cur_similarity
print("Similarity between {} and {}".format(base_word, mood.lower()), cur_similarity)
except:
continue
mood_similarities = [x / len(base_words) for x in mood_similarities]
sorted_moods = [y for x, y in sorted(zip(mood_similarities, scenes), key=lambda x: x[0])]
sorted_moods.reverse()
print(list(zip(scenes, mood_similarities)))
print(sorted_moods)
sorted_moods = [x.lower() for x in sorted_moods]
#print(sorted_moods[0])
final_mood = sorted_moods[0]
#print(scene_info)
return scene_info, final_mood
# scene_info = similar()
| en | 0.237816 | #from visualize import display_pca_scatterplot # if __name__ == "__main__": #model = KeyedVectors.load_word2vec_format('GoogleNews-vectors-negative300-SLIM.bin.gz', binary=True) # moods = ['Uplifting', # 'Epic', # 'Powerful', # 'Exciting', # 'Happy', # 'Funny', # 'Carefree', # 'Hopeful', # 'Love', # 'Playful', # 'Groovy', # 'Sexy', # 'Peaceful', # 'Mysterious', # 'Serious', # 'Dramatic', # 'Angry', # 'Tense', # 'Sad', ] #print("scene") #print(cur_similarity) #print(sorted_moods[0]) #print(scene_info) # scene_info = similar() | 2.996777 | 3 |
model/mil_nce_net.py | ishine/audio-retrieval | 237 | 6612348 | <reponame>ishine/audio-retrieval
from typing import Dict, Tuple
import torch
from typeguard import typechecked
from base import BaseModel
class MNNet(BaseModel):
@typechecked
def __init__(
self,
text_dim: int,
expert_dims: Dict[str, Tuple[int, int]],
**_unused,
):
self.text_dim = text_dim
self.expert_dims = expert_dims
self.modalities = list(expert_dims.keys())
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.ones(1) * 1E-5)
@typechecked
def forward(
self,
text: torch.Tensor,
ind: Dict[str, torch.Tensor],
experts: Dict[str, torch.Tensor],
**_unused,
):
self.sanity_checks(text=text, experts=experts, ind=ind)
vid_embedding = next(iter(experts.values()))
vid_embedding = self.dummy_param + vid_embedding
text = text.view(text.shape[0] * text.shape[1], text.shape[-1])
# text = text / torch.norm(text, p=2, dim=1).reshape(-1, 1)
# vid_embedding = vid_embedding / torch.norm(vid_embedding, p=2,
# dim=1).reshape(-1, 1)
sims = torch.matmul(text, vid_embedding.t())
return {
"modalities": self.modalities,
"cross_view_conf_matrix": sims,
"text_embds": {self.modalities[0]: text},
"vid_embds": {self.modalities[0]: vid_embedding},
}
@typechecked
def sanity_checks(
self,
text: torch.Tensor,
ind: Dict[str, torch.Tensor],
experts: Dict[str, torch.Tensor],
):
msg = f"Text dim {text.shape[-1]} did not match expected {self.text_dim}"
assert text.shape[-1] == self.text_dim, msg
assert len(experts) == 1, "Expected single modality experts"
assert len(text.shape) == 4, "Expected four axes for text input"
assert text.shape[2] == 1, "Expected singleton for text input on dim 2"
for expert in self.expert_dims:
msg = f"Expected all features to be present for {expert}"
assert ind[expert].sum() == len(ind[expert]), msg
feats = experts[expert]
expected = self.expert_dims[expert]
msg = f"Feature shape {feats.shape[1]} did not match expected {expected}"
assert feats.shape[1] == expected[-1], msg
| from typing import Dict, Tuple
import torch
from typeguard import typechecked
from base import BaseModel
class MNNet(BaseModel):
@typechecked
def __init__(
self,
text_dim: int,
expert_dims: Dict[str, Tuple[int, int]],
**_unused,
):
self.text_dim = text_dim
self.expert_dims = expert_dims
self.modalities = list(expert_dims.keys())
super().__init__()
self.dummy_param = torch.nn.Parameter(torch.ones(1) * 1E-5)
@typechecked
def forward(
self,
text: torch.Tensor,
ind: Dict[str, torch.Tensor],
experts: Dict[str, torch.Tensor],
**_unused,
):
self.sanity_checks(text=text, experts=experts, ind=ind)
vid_embedding = next(iter(experts.values()))
vid_embedding = self.dummy_param + vid_embedding
text = text.view(text.shape[0] * text.shape[1], text.shape[-1])
# text = text / torch.norm(text, p=2, dim=1).reshape(-1, 1)
# vid_embedding = vid_embedding / torch.norm(vid_embedding, p=2,
# dim=1).reshape(-1, 1)
sims = torch.matmul(text, vid_embedding.t())
return {
"modalities": self.modalities,
"cross_view_conf_matrix": sims,
"text_embds": {self.modalities[0]: text},
"vid_embds": {self.modalities[0]: vid_embedding},
}
@typechecked
def sanity_checks(
self,
text: torch.Tensor,
ind: Dict[str, torch.Tensor],
experts: Dict[str, torch.Tensor],
):
msg = f"Text dim {text.shape[-1]} did not match expected {self.text_dim}"
assert text.shape[-1] == self.text_dim, msg
assert len(experts) == 1, "Expected single modality experts"
assert len(text.shape) == 4, "Expected four axes for text input"
assert text.shape[2] == 1, "Expected singleton for text input on dim 2"
for expert in self.expert_dims:
msg = f"Expected all features to be present for {expert}"
assert ind[expert].sum() == len(ind[expert]), msg
feats = experts[expert]
expected = self.expert_dims[expert]
msg = f"Feature shape {feats.shape[1]} did not match expected {expected}"
assert feats.shape[1] == expected[-1], msg | en | 0.205147 | # text = text / torch.norm(text, p=2, dim=1).reshape(-1, 1) # vid_embedding = vid_embedding / torch.norm(vid_embedding, p=2, # dim=1).reshape(-1, 1) | 2.201579 | 2 |
src/zenodo_client/cli.py | cthoyt/zenodo-client | 3 | 6612349 | # -*- coding: utf-8 -*-
"""Command line interface for :mod:`zenodo_client`.
Why does this file exist, and why not put this in ``__main__``? You might be tempted to import things from ``__main__``
later, but that will cause problems--the code will get executed twice:
- When you run ``python3 -m zenodo_client`` python will execute``__main__.py`` as a script.
That means there won't be any ``zenodo_client.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``zenodo_client.__main__`` in ``sys.modules``.
.. seealso:: https://click.palletsprojects.com/en/7.x/setuptools/#setuptools-integration
"""
import logging
import click
from more_click import verbose_option
from .api import download_zenodo, download_zenodo_latest, update_zenodo
__all__ = ['main']
logger = logging.getLogger(__name__)
@click.group()
def main():
"""CLI for Zenodo Client."""
@main.command()
@click.argument('deposition')
@click.argument('path')
@click.option('--force', is_flag=True)
@click.option('--latest', is_flag=True)
def download(deposition: str, path: str, force: bool, latest: bool):
"""Ensure a record is downloaded."""
if latest:
download_zenodo_latest(deposition, path, force=force)
else:
download_zenodo(deposition, path, force=force)
@main.group()
@click.argument('deposition')
@click.argument('paths', nargs=-1)
@verbose_option
@click.version_option()
def update(deposition, paths):
"""Update the record and given files."""
update_zenodo(deposition, paths)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""Command line interface for :mod:`zenodo_client`.
Why does this file exist, and why not put this in ``__main__``? You might be tempted to import things from ``__main__``
later, but that will cause problems--the code will get executed twice:
- When you run ``python3 -m zenodo_client`` python will execute``__main__.py`` as a script.
That means there won't be any ``zenodo_client.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``zenodo_client.__main__`` in ``sys.modules``.
.. seealso:: https://click.palletsprojects.com/en/7.x/setuptools/#setuptools-integration
"""
import logging
import click
from more_click import verbose_option
from .api import download_zenodo, download_zenodo_latest, update_zenodo
__all__ = ['main']
logger = logging.getLogger(__name__)
@click.group()
def main():
"""CLI for Zenodo Client."""
@main.command()
@click.argument('deposition')
@click.argument('path')
@click.option('--force', is_flag=True)
@click.option('--latest', is_flag=True)
def download(deposition: str, path: str, force: bool, latest: bool):
"""Ensure a record is downloaded."""
if latest:
download_zenodo_latest(deposition, path, force=force)
else:
download_zenodo(deposition, path, force=force)
@main.group()
@click.argument('deposition')
@click.argument('paths', nargs=-1)
@verbose_option
@click.version_option()
def update(deposition, paths):
"""Update the record and given files."""
update_zenodo(deposition, paths)
if __name__ == '__main__':
main()
| en | 0.798716 | # -*- coding: utf-8 -*- Command line interface for :mod:`zenodo_client`. Why does this file exist, and why not put this in ``__main__``? You might be tempted to import things from ``__main__`` later, but that will cause problems--the code will get executed twice: - When you run ``python3 -m zenodo_client`` python will execute``__main__.py`` as a script. That means there won't be any ``zenodo_client.__main__`` in ``sys.modules``. - When you import __main__ it will get executed again (as a module) because there's no ``zenodo_client.__main__`` in ``sys.modules``. .. seealso:: https://click.palletsprojects.com/en/7.x/setuptools/#setuptools-integration CLI for Zenodo Client. Ensure a record is downloaded. Update the record and given files. | 2.266475 | 2 |
anime_dl/providers/all_anime_provider.py | ArjixWasTaken/anime-dl | 0 | 6612350 | <reponame>ArjixWasTaken/anime-dl<gh_stars>0
from anime_dl.providers.ProviderAPI import Provider, SearchResult, LoadResponse, TvStatus, ExtractorLink
from anime_dl.utils import session as ses
from bs4 import BeautifulSoup
import typing
import dukpy
def get_status(status):
if status == "Releasing":
return TvStatus.ONGOING
return TvStatus.COMPLETED
class AllAnimeProvider(Provider):
api_name = "AllAnimeProvider"
main_url = "https://allanime.site"
session = ses
def search(self, query: str) -> typing.List[SearchResult]:
payload = f"variables=%7B%22search%22%3A%7B%22allowAdult%22%3Afalse%2C%22query%22%3A%22{query}%22%7D%2C%22limit%22%3A100%2C%22page%22%3A1%2C%22translationType%22%3A%22sub%22%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%229343797cc3d9e3f444e2d3b7db9a84d759b816a4d84512ea72d079f85bb96e98%22%7D%7D"
response = self.session.get(f"https://allanime.site/graphql?{payload}")
if "PERSISTED_QUERY_NOT_FOUND" in response.text:
response = self.session.get(
f"https://allanime.site/graphql?{payload}")
if "PERSISTED_QUERY_NOT_FOUND" in response.text:
return []
response = response.json()
results = []
for result in response["data"]["shows"]["edges"]:
skip = 0
episodes = result["availableEpisodes"]
for typ in ("raw", "sub", "dub"):
# To filter out anime that have no episodes.
if typ in episodes:
if episodes[typ] == 0:
skip += 1
if skip == 3:
continue
results.append(result)
return [
SearchResult(
x["name"],
f"{self.main_url}/anime/{x['_id']}",
self.api_name,
x["thumbnail"],
x["season"]["year"]
)
for x in results
]
def load(self, url: str) -> LoadResponse:
html = self.session.get(url).text
soup = BeautifulSoup(html, "html.parser")
for script in soup.select("script"):
if "window.__NUXT__" in str(script):
show_data = dukpy.evaljs([
"const window = {}",
script.text,
"window.__NUXT__.fetch[0].show"
])
return LoadResponse(
show_data["name"],
url,
self.api_name,
[
f"{self.main_url}/anime/{show_data['_id']}/episodes/sub/{x}"
for i in show_data["availableEpisodes"]
for x in range(show_data["availableEpisodes"][i])
],
show_data["thumbnail"],
show_data["description"],
None,
show_data["airedStart"]["year"] if "year" in show_data["airedStart"] else None,
get_status(show_data["status"])
)
raise FileNotFoundError("The given anime was not found.")
def load_links(self, url: str) -> typing.List[ExtractorLink]:
raise NotImplementedError
| from anime_dl.providers.ProviderAPI import Provider, SearchResult, LoadResponse, TvStatus, ExtractorLink
from anime_dl.utils import session as ses
from bs4 import BeautifulSoup
import typing
import dukpy
def get_status(status):
if status == "Releasing":
return TvStatus.ONGOING
return TvStatus.COMPLETED
class AllAnimeProvider(Provider):
api_name = "AllAnimeProvider"
main_url = "https://allanime.site"
session = ses
def search(self, query: str) -> typing.List[SearchResult]:
payload = f"variables=%7B%22search%22%3A%7B%22allowAdult%22%3Afalse%2C%22query%22%3A%22{query}%22%7D%2C%22limit%22%3A100%2C%22page%22%3A1%2C%22translationType%22%3A%22sub%22%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%229343797cc3d9e3f444e2d3b7db9a84d759b816a4d84512ea72d079f85bb96e98%22%7D%7D"
response = self.session.get(f"https://allanime.site/graphql?{payload}")
if "PERSISTED_QUERY_NOT_FOUND" in response.text:
response = self.session.get(
f"https://allanime.site/graphql?{payload}")
if "PERSISTED_QUERY_NOT_FOUND" in response.text:
return []
response = response.json()
results = []
for result in response["data"]["shows"]["edges"]:
skip = 0
episodes = result["availableEpisodes"]
for typ in ("raw", "sub", "dub"):
# To filter out anime that have no episodes.
if typ in episodes:
if episodes[typ] == 0:
skip += 1
if skip == 3:
continue
results.append(result)
return [
SearchResult(
x["name"],
f"{self.main_url}/anime/{x['_id']}",
self.api_name,
x["thumbnail"],
x["season"]["year"]
)
for x in results
]
def load(self, url: str) -> LoadResponse:
html = self.session.get(url).text
soup = BeautifulSoup(html, "html.parser")
for script in soup.select("script"):
if "window.__NUXT__" in str(script):
show_data = dukpy.evaljs([
"const window = {}",
script.text,
"window.__NUXT__.fetch[0].show"
])
return LoadResponse(
show_data["name"],
url,
self.api_name,
[
f"{self.main_url}/anime/{show_data['_id']}/episodes/sub/{x}"
for i in show_data["availableEpisodes"]
for x in range(show_data["availableEpisodes"][i])
],
show_data["thumbnail"],
show_data["description"],
None,
show_data["airedStart"]["year"] if "year" in show_data["airedStart"] else None,
get_status(show_data["status"])
)
raise FileNotFoundError("The given anime was not found.")
def load_links(self, url: str) -> typing.List[ExtractorLink]:
raise NotImplementedError | en | 0.94483 | # To filter out anime that have no episodes. | 2.526425 | 3 |