max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
tests/functional_tests.py
|
jab/py-skiplist
| 23
|
6625551
|
<reponame>jab/py-skiplist
import unittest
import collections
from py_skiplist.iterators import uniform
from py_skiplist.skiplist import Skiplist
class InterfaceTestCase(unittest.TestCase):
def test_interface_methods_set(self):
self.assertTrue(issubclass(Skiplist, collections.MutableMapping),
msg='Skiplist should alway implement the MutableMapping interface')
def test_get(self):
sl = Skiplist(foo='bar')
self.assertEqual(sl.get('foo'), 'bar')
self.assertEqual(sl.get('None', 'baz'), 'baz')
self.assertIsNone(sl.get('Nothing'))
def test_contains(self):
sl = Skiplist(one=1)
self.assertIn('one', sl)
self.assertNotIn('two', sl)
def test_pop(self):
sl = Skiplist(john='Snow')
self.assertEqual(sl.pop('john'), 'Snow')
self.assertRaises(KeyError, lambda: sl.pop('Sansa'))
def test_iteritems(self):
sl = Skiplist(one=1, two=2)
self.assertListEqual(sorted([('one', 1), ('two', 2)]),
sorted(sl.iteritems()))
class SkipListTestCase(unittest.TestCase):
def test_insert(self):
sl = Skiplist()
sl._insert(1, 1)
e = sl[1]
self.assertEqual(e, 1)
def test_update(self):
sl = Skiplist()
sl['foo'] = 'bar'
self.assertEqual(sl['foo'], 'bar')
sl['foo'] = 'baz'
self.assertEqual(sl['foo'], 'baz')
def test_remove(self):
sl = Skiplist()
sl['what'] = 'that'
self.assertTrue(sl['what'])
del sl['what']
self.assertRaises(KeyError, lambda: sl['what'])
self.assertRaises(KeyError, lambda: sl._remove('not here'))
def test_init(self):
sl = Skiplist(a=1, b=2)
self.assertEqual(sl['a'], 1)
self.assertEqual(sl['b'], 2)
self.assertEqual(len(sl), 2)
def test_str(self):
sl = Skiplist()
self.assertEqual('skiplist({})', str(sl))
sl['1'] = 1
self.assertEqual('skiplist({1: 1})', str(sl))
def test_589(self):
sl = Skiplist()
sl.distribution = uniform(2)
sl[10] = 10
sl[2] = 2
sl[3] = 3
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
import unittest
import collections
from py_skiplist.iterators import uniform
from py_skiplist.skiplist import Skiplist
class InterfaceTestCase(unittest.TestCase):
def test_interface_methods_set(self):
self.assertTrue(issubclass(Skiplist, collections.MutableMapping),
msg='Skiplist should alway implement the MutableMapping interface')
def test_get(self):
sl = Skiplist(foo='bar')
self.assertEqual(sl.get('foo'), 'bar')
self.assertEqual(sl.get('None', 'baz'), 'baz')
self.assertIsNone(sl.get('Nothing'))
def test_contains(self):
sl = Skiplist(one=1)
self.assertIn('one', sl)
self.assertNotIn('two', sl)
def test_pop(self):
sl = Skiplist(john='Snow')
self.assertEqual(sl.pop('john'), 'Snow')
self.assertRaises(KeyError, lambda: sl.pop('Sansa'))
def test_iteritems(self):
sl = Skiplist(one=1, two=2)
self.assertListEqual(sorted([('one', 1), ('two', 2)]),
sorted(sl.iteritems()))
class SkipListTestCase(unittest.TestCase):
def test_insert(self):
sl = Skiplist()
sl._insert(1, 1)
e = sl[1]
self.assertEqual(e, 1)
def test_update(self):
sl = Skiplist()
sl['foo'] = 'bar'
self.assertEqual(sl['foo'], 'bar')
sl['foo'] = 'baz'
self.assertEqual(sl['foo'], 'baz')
def test_remove(self):
sl = Skiplist()
sl['what'] = 'that'
self.assertTrue(sl['what'])
del sl['what']
self.assertRaises(KeyError, lambda: sl['what'])
self.assertRaises(KeyError, lambda: sl._remove('not here'))
def test_init(self):
sl = Skiplist(a=1, b=2)
self.assertEqual(sl['a'], 1)
self.assertEqual(sl['b'], 2)
self.assertEqual(len(sl), 2)
def test_str(self):
sl = Skiplist()
self.assertEqual('skiplist({})', str(sl))
sl['1'] = 1
self.assertEqual('skiplist({1: 1})', str(sl))
def test_589(self):
sl = Skiplist()
sl.distribution = uniform(2)
sl[10] = 10
sl[2] = 2
sl[3] = 3
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
none
| 1
| 2.833649
| 3
|
|
tests/core/commands/test_cmd_add.py
|
Starz0r/pytuber
| 8
|
6625552
|
from unittest import mock
from pytuber import cli
from pytuber.core.commands.cmd_add import (
create_playlist,
parse_jspf,
parse_m3u,
parse_text,
parse_xspf,
)
from pytuber.core.models import PlaylistManager, PlaylistType, Provider
from tests.utils import CommandTestCase, PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("click.edit")
@mock.patch("pytuber.core.commands.cmd_add.create_playlist")
@mock.patch("pytuber.core.commands.cmd_add.parse_text")
def test_add_from_editor(self, parse_text, create_playlist, clk_edit):
clk_edit.return_value = "foo"
parse_text.return_value = ["a", "b"]
self.runner.invoke(
cli, ["add", "editor", "--title", "My Cool Playlist"]
)
parse_text.assert_called_once_with("foo")
create_playlist.assert_called_once_with(
arguments={"_title": "My Cool Playlist"},
title="My Cool Playlist",
tracks=["a", "b"],
type=PlaylistType.EDITOR,
)
@mock.patch("pytuber.core.commands.cmd_add.parse_m3u")
@mock.patch("pytuber.core.commands.cmd_add.parse_xspf")
@mock.patch("pytuber.core.commands.cmd_add.parse_jspf")
@mock.patch("pytuber.core.commands.cmd_add.parse_text")
@mock.patch("pytuber.core.commands.cmd_add.create_playlist")
def test_add_from_file(self, create_playlist, *args):
text, jspf, xspf, m3u = args
text.return_value = list("txt")
jspf.return_value = list("jspf")
xspf.return_value = list("xspf")
m3u.return_value = list("m3u")
with self.runner.isolated_filesystem():
for format in ["txt", "jspf", "xspf", "m3u"]:
with open("hello.{}".format(format), "w") as f:
f.write(format)
self.runner.invoke(
cli,
[
"add",
"file",
"hello.{}".format(format),
"--title",
"Mew",
"--format",
format,
],
)
jspf.assert_called_once_with("jspf")
xspf.assert_called_once_with("xspf")
text.assert_called_once_with("txt")
m3u.assert_called_once_with("m3u")
create_playlist.assert_has_calls(
[
mock.call(
arguments={"_file": "hello.txt"},
title="Mew",
tracks=list("txt"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.jspf"},
title="Mew",
tracks=list("jspf"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.xspf"},
title="Mew",
tracks=list("xspf"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.m3u"},
title="Mew",
tracks=list("m3u"),
type=PlaylistType.FILE,
),
]
)
class CommandAddUtilsTests(CommandTestCase):
def test_parse_text(self):
text = "\n".join(
(
"Queen - Bohemian Rhapsody",
" Queen - Bohemian Rhapsody",
"Queen -I want to break free",
" # a - b",
" ",
"Wrong Format",
)
)
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_text(text))
def test_parse_xspf(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<playlist version="1" xmlns="http://xspf.org/ns/0/">
<trackList>
<track>
<creator>Queen</creator>
<title>Bohemian Rhapsody</title>
</track>
<track>
<creator>Queen</creator>
<title>Bohemian Rhapsody</title>
</track>
<track>
<creator>Queen</creator>
<title>I want to break free</title>
</track>
<track>
<creator>No track</creator>
</track>
<track>
<title>No artist</title>
</track>
</trackList>
</playlist>"""
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_xspf(xml))
self.assertEqual([], parse_xspf(""))
def test_parse_jspf(self):
json = """
{
"playlist": {
"title": "Two Songs From Thriller",
"creator": "<NAME>",
"track": [
{
"title": "Bohemian Rhapsody",
"creator": "Queen"
},
{
"title": "Bohemian Rhapsody",
"creator": "Queen"
},
{
"creator": "Queen"
},
{
"title": "Bohemian Rhapsody"
},
{
"title": "I want to break free",
"creator": "Queen"
}
]
}
}"""
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_jspf(json))
self.assertEqual([], parse_jspf(""))
def test_parse_m3u(self):
text = "\n".join(
(
"#EXTM3U",
"#EXTINF:123, Queen - Bohemian Rhapsody",
"#EXTINF:123, Queen - Bohemian Rhapsody",
"#EXTINF:123,Queen - I want to break free",
"#EXTINF:123 whatever format 1",
"#EXTINF:123, whatever format 2",
)
)
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_m3u(text))
self.assertEqual([], parse_m3u(""))
@mock.patch("pytuber.core.commands.cmd_add.magenta")
@mock.patch.object(PlaylistManager, "set")
@mock.patch("click.confirm")
@mock.patch("click.secho")
@mock.patch("click.clear")
def test_create_playlist(self, clear, secho, confirm, set, magenta):
magenta.side_effect = lambda x: x
set.return_value = PlaylistFixture.one()
tracks = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
create_playlist(
title="My Cool Playlist",
tracks=tracks,
type="foo",
arguments=dict(foo="bar"),
)
expected_ouput = (
"Title: My Cool Playlist",
"Tracks: 2",
"",
" No Artist Track Name",
"---- -------- --------------------",
" 1 Queen Bohemian Rhapsody",
" 2 Queen I want to break free",
)
self.assertOutput(expected_ouput, secho.call_args_list[0][0][0])
self.assertEqual(
"Added playlist: id_a!", secho.call_args_list[1][0][0]
)
clear.assert_called_once_with()
confirm.assert_called_once_with(
"Are you sure you want to save this playlist?", abort=True
)
set.assert_called_once_with(
dict(
type="foo",
arguments=dict(foo="bar"),
provider=Provider.user,
title="My Cool Playlist",
tracks=["55a4d2b", "b045fee"],
)
)
@mock.patch("click.secho")
def test_create_playlist_empty_tracks(self, secho):
create_playlist(title="foo", tracks=[], type=None, arguments=None)
secho.assert_called_once_with("Tracklist is empty, aborting...")
|
from unittest import mock
from pytuber import cli
from pytuber.core.commands.cmd_add import (
create_playlist,
parse_jspf,
parse_m3u,
parse_text,
parse_xspf,
)
from pytuber.core.models import PlaylistManager, PlaylistType, Provider
from tests.utils import CommandTestCase, PlaylistFixture
class CommandAddTests(CommandTestCase):
@mock.patch("click.edit")
@mock.patch("pytuber.core.commands.cmd_add.create_playlist")
@mock.patch("pytuber.core.commands.cmd_add.parse_text")
def test_add_from_editor(self, parse_text, create_playlist, clk_edit):
clk_edit.return_value = "foo"
parse_text.return_value = ["a", "b"]
self.runner.invoke(
cli, ["add", "editor", "--title", "My Cool Playlist"]
)
parse_text.assert_called_once_with("foo")
create_playlist.assert_called_once_with(
arguments={"_title": "My Cool Playlist"},
title="My Cool Playlist",
tracks=["a", "b"],
type=PlaylistType.EDITOR,
)
@mock.patch("pytuber.core.commands.cmd_add.parse_m3u")
@mock.patch("pytuber.core.commands.cmd_add.parse_xspf")
@mock.patch("pytuber.core.commands.cmd_add.parse_jspf")
@mock.patch("pytuber.core.commands.cmd_add.parse_text")
@mock.patch("pytuber.core.commands.cmd_add.create_playlist")
def test_add_from_file(self, create_playlist, *args):
text, jspf, xspf, m3u = args
text.return_value = list("txt")
jspf.return_value = list("jspf")
xspf.return_value = list("xspf")
m3u.return_value = list("m3u")
with self.runner.isolated_filesystem():
for format in ["txt", "jspf", "xspf", "m3u"]:
with open("hello.{}".format(format), "w") as f:
f.write(format)
self.runner.invoke(
cli,
[
"add",
"file",
"hello.{}".format(format),
"--title",
"Mew",
"--format",
format,
],
)
jspf.assert_called_once_with("jspf")
xspf.assert_called_once_with("xspf")
text.assert_called_once_with("txt")
m3u.assert_called_once_with("m3u")
create_playlist.assert_has_calls(
[
mock.call(
arguments={"_file": "hello.txt"},
title="Mew",
tracks=list("txt"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.jspf"},
title="Mew",
tracks=list("jspf"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.xspf"},
title="Mew",
tracks=list("xspf"),
type=PlaylistType.FILE,
),
mock.call(
arguments={"_file": "hello.m3u"},
title="Mew",
tracks=list("m3u"),
type=PlaylistType.FILE,
),
]
)
class CommandAddUtilsTests(CommandTestCase):
def test_parse_text(self):
text = "\n".join(
(
"Queen - Bohemian Rhapsody",
" Queen - Bohemian Rhapsody",
"Queen -I want to break free",
" # a - b",
" ",
"Wrong Format",
)
)
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_text(text))
def test_parse_xspf(self):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<playlist version="1" xmlns="http://xspf.org/ns/0/">
<trackList>
<track>
<creator>Queen</creator>
<title>Bohemian Rhapsody</title>
</track>
<track>
<creator>Queen</creator>
<title>Bohemian Rhapsody</title>
</track>
<track>
<creator>Queen</creator>
<title>I want to break free</title>
</track>
<track>
<creator>No track</creator>
</track>
<track>
<title>No artist</title>
</track>
</trackList>
</playlist>"""
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_xspf(xml))
self.assertEqual([], parse_xspf(""))
def test_parse_jspf(self):
json = """
{
"playlist": {
"title": "Two Songs From Thriller",
"creator": "<NAME>",
"track": [
{
"title": "Bohemian Rhapsody",
"creator": "Queen"
},
{
"title": "Bohemian Rhapsody",
"creator": "Queen"
},
{
"creator": "Queen"
},
{
"title": "Bohemian Rhapsody"
},
{
"title": "I want to break free",
"creator": "Queen"
}
]
}
}"""
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_jspf(json))
self.assertEqual([], parse_jspf(""))
def test_parse_m3u(self):
text = "\n".join(
(
"#EXTM3U",
"#EXTINF:123, Queen - Bohemian Rhapsody",
"#EXTINF:123, Queen - Bohemian Rhapsody",
"#EXTINF:123,Queen - I want to break free",
"#EXTINF:123 whatever format 1",
"#EXTINF:123, whatever format 2",
)
)
expected = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
self.assertEqual(expected, parse_m3u(text))
self.assertEqual([], parse_m3u(""))
@mock.patch("pytuber.core.commands.cmd_add.magenta")
@mock.patch.object(PlaylistManager, "set")
@mock.patch("click.confirm")
@mock.patch("click.secho")
@mock.patch("click.clear")
def test_create_playlist(self, clear, secho, confirm, set, magenta):
magenta.side_effect = lambda x: x
set.return_value = PlaylistFixture.one()
tracks = [
("Queen", "Bohemian Rhapsody"),
("Queen", "I want to break free"),
]
create_playlist(
title="My Cool Playlist",
tracks=tracks,
type="foo",
arguments=dict(foo="bar"),
)
expected_ouput = (
"Title: My Cool Playlist",
"Tracks: 2",
"",
" No Artist Track Name",
"---- -------- --------------------",
" 1 Queen Bohemian Rhapsody",
" 2 Queen I want to break free",
)
self.assertOutput(expected_ouput, secho.call_args_list[0][0][0])
self.assertEqual(
"Added playlist: id_a!", secho.call_args_list[1][0][0]
)
clear.assert_called_once_with()
confirm.assert_called_once_with(
"Are you sure you want to save this playlist?", abort=True
)
set.assert_called_once_with(
dict(
type="foo",
arguments=dict(foo="bar"),
provider=Provider.user,
title="My Cool Playlist",
tracks=["55a4d2b", "b045fee"],
)
)
@mock.patch("click.secho")
def test_create_playlist_empty_tracks(self, secho):
create_playlist(title="foo", tracks=[], type=None, arguments=None)
secho.assert_called_once_with("Tracklist is empty, aborting...")
|
en
| 0.432059
|
# a - b", <?xml version="1.0" encoding="UTF-8"?> <playlist version="1" xmlns="http://xspf.org/ns/0/"> <trackList> <track> <creator>Queen</creator> <title>Bohemian Rhapsody</title> </track> <track> <creator>Queen</creator> <title>Bohemian Rhapsody</title> </track> <track> <creator>Queen</creator> <title>I want to break free</title> </track> <track> <creator>No track</creator> </track> <track> <title>No artist</title> </track> </trackList> </playlist> { "playlist": { "title": "Two Songs From Thriller", "creator": "<NAME>", "track": [ { "title": "Bohemian Rhapsody", "creator": "Queen" }, { "title": "Bohemian Rhapsody", "creator": "Queen" }, { "creator": "Queen" }, { "title": "Bohemian Rhapsody" }, { "title": "I want to break free", "creator": "Queen" } ] } }
| 2.606499
| 3
|
DjangoWebProject4/__init__.py
|
sonalnikam/P
| 0
|
6625553
|
<gh_stars>0
"""
Package for DjangoWebProject4.
"""
|
"""
Package for DjangoWebProject4.
"""
|
en
| 0.482155
|
Package for DjangoWebProject4.
| 1.028058
| 1
|
tests/test_boot.py
|
cu2/aldebaran
| 4
|
6625554
|
import unittest
from unittest.mock import Mock, patch
from utils import boot
from utils import executable
class TestBootImage(unittest.TestCase):
def setUp(self):
self.mock_mgr = Mock()
self.mock_mgr.__enter__ = Mock()
self.mock_mgr.__exit__ = Mock()
def test_write_byte(self):
boot_image = boot.BootImage(4)
self.assertListEqual(boot_image.content, [0, 0, 0, 0])
boot_image.write_byte(2, 0xFF)
self.assertListEqual(boot_image.content, [0, 0, 0xFF, 0])
def test_write_word(self):
boot_image = boot.BootImage(4)
self.assertListEqual(boot_image.content, [0, 0, 0, 0])
boot_image.write_word(1, 0x1234)
self.assertListEqual(boot_image.content, [0, 0x12, 0x34, 0])
@patch('builtins.open')
def test_save(self, mock_open):
mock_file = MockFile([])
self.mock_mgr.__enter__.return_value = mock_file
mock_open.return_value = self.mock_mgr
boot_image = boot.BootImage(4)
boot_image.content = [0, 1, 2, 3]
self.assertListEqual(mock_file.new_content, [])
boot_image.save('nofile')
self.assertListEqual(mock_file.new_content, [0, 1, 2, 3])
self.assertTupleEqual(self.mock_mgr.__exit__.call_args_list[0][0], (None, None, None))
@patch('builtins.open')
def test_load(self, mock_open):
mock_file = MockFile([4, 5, 6, 7])
self.mock_mgr.__enter__.return_value = mock_file
mock_open.return_value = self.mock_mgr
boot_image = boot.BootImage(2)
self.assertEqual(boot_image.size, 2)
self.assertListEqual(boot_image.content, [0, 0])
boot_image.load('nofile')
self.assertEqual(boot_image.size, 4)
self.assertListEqual(boot_image.content, [4, 5, 6, 7])
self.assertTupleEqual(self.mock_mgr.__exit__.call_args_list[0][0], (None, None, None))
class TestBootLoader(unittest.TestCase):
def setUp(self):
self.ram = Mock()
def test_load_image(self):
boot_image = boot.BootImage(8)
boot_image.write_word(0, 0x0123)
boot_image.write_word(2, 0x4567)
boot_image.write_word(4, 0x89AB)
boot_image.write_word(6, 0xCDEF)
boot_loader = boot.BootLoader(self.ram)
boot_loader.load_image(5, boot_image)
self.assertEqual(self.ram.write_byte.call_count, 8)
write_byte_calls = self.ram.write_byte.call_args_list
self.assertTupleEqual(write_byte_calls[0][0], (5, 0x01))
self.assertTupleEqual(write_byte_calls[1][0], (6, 0x23))
self.assertTupleEqual(write_byte_calls[2][0], (7, 0x45))
self.assertTupleEqual(write_byte_calls[3][0], (8, 0x67))
self.assertTupleEqual(write_byte_calls[4][0], (9, 0x89))
self.assertTupleEqual(write_byte_calls[5][0], (10, 0xAB))
self.assertTupleEqual(write_byte_calls[6][0], (11, 0xCD))
self.assertTupleEqual(write_byte_calls[7][0], (12, 0xEF))
def test_load_executable(self):
boot_exe = executable.Executable(
version=1,
opcode=[0x12, 0x34, 0x56],
)
boot_loader = boot.BootLoader(self.ram)
boot_loader.load_executable(5, boot_exe)
self.assertEqual(self.ram.write_byte.call_count, 3)
write_byte_calls = self.ram.write_byte.call_args_list
self.assertTupleEqual(write_byte_calls[0][0], (5, 0x12))
self.assertTupleEqual(write_byte_calls[1][0], (6, 0x34))
self.assertTupleEqual(write_byte_calls[2][0], (7, 0x56))
class MockFile:
def __init__(self, content):
self.content = content
self.new_content = []
def read(self):
return bytes(self.content)
def write(self, new_content_bytes):
self.new_content = list(new_content_bytes)
|
import unittest
from unittest.mock import Mock, patch
from utils import boot
from utils import executable
class TestBootImage(unittest.TestCase):
def setUp(self):
self.mock_mgr = Mock()
self.mock_mgr.__enter__ = Mock()
self.mock_mgr.__exit__ = Mock()
def test_write_byte(self):
boot_image = boot.BootImage(4)
self.assertListEqual(boot_image.content, [0, 0, 0, 0])
boot_image.write_byte(2, 0xFF)
self.assertListEqual(boot_image.content, [0, 0, 0xFF, 0])
def test_write_word(self):
boot_image = boot.BootImage(4)
self.assertListEqual(boot_image.content, [0, 0, 0, 0])
boot_image.write_word(1, 0x1234)
self.assertListEqual(boot_image.content, [0, 0x12, 0x34, 0])
@patch('builtins.open')
def test_save(self, mock_open):
mock_file = MockFile([])
self.mock_mgr.__enter__.return_value = mock_file
mock_open.return_value = self.mock_mgr
boot_image = boot.BootImage(4)
boot_image.content = [0, 1, 2, 3]
self.assertListEqual(mock_file.new_content, [])
boot_image.save('nofile')
self.assertListEqual(mock_file.new_content, [0, 1, 2, 3])
self.assertTupleEqual(self.mock_mgr.__exit__.call_args_list[0][0], (None, None, None))
@patch('builtins.open')
def test_load(self, mock_open):
mock_file = MockFile([4, 5, 6, 7])
self.mock_mgr.__enter__.return_value = mock_file
mock_open.return_value = self.mock_mgr
boot_image = boot.BootImage(2)
self.assertEqual(boot_image.size, 2)
self.assertListEqual(boot_image.content, [0, 0])
boot_image.load('nofile')
self.assertEqual(boot_image.size, 4)
self.assertListEqual(boot_image.content, [4, 5, 6, 7])
self.assertTupleEqual(self.mock_mgr.__exit__.call_args_list[0][0], (None, None, None))
class TestBootLoader(unittest.TestCase):
def setUp(self):
self.ram = Mock()
def test_load_image(self):
boot_image = boot.BootImage(8)
boot_image.write_word(0, 0x0123)
boot_image.write_word(2, 0x4567)
boot_image.write_word(4, 0x89AB)
boot_image.write_word(6, 0xCDEF)
boot_loader = boot.BootLoader(self.ram)
boot_loader.load_image(5, boot_image)
self.assertEqual(self.ram.write_byte.call_count, 8)
write_byte_calls = self.ram.write_byte.call_args_list
self.assertTupleEqual(write_byte_calls[0][0], (5, 0x01))
self.assertTupleEqual(write_byte_calls[1][0], (6, 0x23))
self.assertTupleEqual(write_byte_calls[2][0], (7, 0x45))
self.assertTupleEqual(write_byte_calls[3][0], (8, 0x67))
self.assertTupleEqual(write_byte_calls[4][0], (9, 0x89))
self.assertTupleEqual(write_byte_calls[5][0], (10, 0xAB))
self.assertTupleEqual(write_byte_calls[6][0], (11, 0xCD))
self.assertTupleEqual(write_byte_calls[7][0], (12, 0xEF))
def test_load_executable(self):
boot_exe = executable.Executable(
version=1,
opcode=[0x12, 0x34, 0x56],
)
boot_loader = boot.BootLoader(self.ram)
boot_loader.load_executable(5, boot_exe)
self.assertEqual(self.ram.write_byte.call_count, 3)
write_byte_calls = self.ram.write_byte.call_args_list
self.assertTupleEqual(write_byte_calls[0][0], (5, 0x12))
self.assertTupleEqual(write_byte_calls[1][0], (6, 0x34))
self.assertTupleEqual(write_byte_calls[2][0], (7, 0x56))
class MockFile:
def __init__(self, content):
self.content = content
self.new_content = []
def read(self):
return bytes(self.content)
def write(self, new_content_bytes):
self.new_content = list(new_content_bytes)
|
none
| 1
| 2.782064
| 3
|
|
tests/test_ssl.py
|
AvitalFineRedis/redis-py
| 0
|
6625555
|
import os
import socket
import ssl
from urllib.parse import urlparse
import pytest
import redis
from redis.exceptions import ConnectionError, RedisError
from .conftest import skip_if_cryptography, skip_if_nocryptography
@pytest.mark.ssl
class TestSSL:
"""Tests for SSL connections
This relies on the --redis-ssl-url purely for rebuilding the client
and connecting to the appropriate port.
"""
ROOT = os.path.join(os.path.dirname(__file__), "..")
CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
if not os.path.isdir(CERT_DIR): # github actions package validation case
CERT_DIR = os.path.abspath(
os.path.join(ROOT, "..", "docker", "stunnel", "keys")
)
if not os.path.isdir(CERT_DIR):
raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
SERVER_CERT = os.path.join(CERT_DIR, "server-cert.pem")
SERVER_KEY = os.path.join(CERT_DIR, "server-key.pem")
def test_ssl_with_invalid_cert(self, request):
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
def test_ssl_connection(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none")
assert r.ping()
def test_ssl_connection_without_ssl(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=False)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "Connection closed by server" in str(e)
def test_validating_self_signed_certificate(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
)
assert r.ping()
def _create_oscp_conn(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp=True,
)
return r
@skip_if_cryptography()
def test_ssl_ocsp_called(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(RedisError) as e:
assert r.ping()
assert "cryptography not installed" in str(e)
@skip_if_nocryptography()
def test_ssl_ocsp_called_withcrypto(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(ConnectionError) as e:
assert r.ping()
assert "No AIA information present in ssl certificate" in str(e)
# rediss://, url based
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
assert "No AIA information present in ssl certificate" in str(e)
@skip_if_nocryptography()
def test_valid_ocsp_cert_http(self):
from redis.ocsp import OCSPVerifier
hostnames = ["github.com", "aws.amazon.com", "ynet.co.il", "microsoft.com"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_revoked_ocsp_certificate(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "revoked.badssl.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid() is False
@skip_if_nocryptography()
def test_unauthorized_ocsp(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "stackoverflow.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError):
ocsp.is_valid()
@skip_if_nocryptography()
def test_ocsp_not_present_in_response(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "google.co.il"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid() is False
@skip_if_nocryptography()
def test_unauthorized_then_direct(self):
from redis.ocsp import OCSPVerifier
# these certificates on the socket end return unauthorized
# then the second call succeeds
hostnames = ["wikipedia.org", "squarespace.com"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_mock_ocsp_staple(self, request):
import OpenSSL
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp=True,
ssl_ocsp_context=p, # just needs to not be none
)
with pytest.raises(RedisError):
r.ping()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_certificate_file(self.SERVER_CERT)
ctx.use_privatekey_file(self.SERVER_KEY)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_ocsp_context=ctx,
ssl_ocsp_expected_cert=open(self.SERVER_KEY, "rb").read(),
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
|
import os
import socket
import ssl
from urllib.parse import urlparse
import pytest
import redis
from redis.exceptions import ConnectionError, RedisError
from .conftest import skip_if_cryptography, skip_if_nocryptography
@pytest.mark.ssl
class TestSSL:
"""Tests for SSL connections
This relies on the --redis-ssl-url purely for rebuilding the client
and connecting to the appropriate port.
"""
ROOT = os.path.join(os.path.dirname(__file__), "..")
CERT_DIR = os.path.abspath(os.path.join(ROOT, "docker", "stunnel", "keys"))
if not os.path.isdir(CERT_DIR): # github actions package validation case
CERT_DIR = os.path.abspath(
os.path.join(ROOT, "..", "docker", "stunnel", "keys")
)
if not os.path.isdir(CERT_DIR):
raise IOError(f"No SSL certificates found. They should be in {CERT_DIR}")
SERVER_CERT = os.path.join(CERT_DIR, "server-cert.pem")
SERVER_KEY = os.path.join(CERT_DIR, "server-key.pem")
def test_ssl_with_invalid_cert(self, request):
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
assert "SSL: CERTIFICATE_VERIFY_FAILED" in str(e)
def test_ssl_connection(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=True, ssl_cert_reqs="none")
assert r.ping()
def test_ssl_connection_without_ssl(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(host=p[0], port=p[1], ssl=False)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "Connection closed by server" in str(e)
def test_validating_self_signed_certificate(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
)
assert r.ping()
def _create_oscp_conn(self, request):
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp=True,
)
return r
@skip_if_cryptography()
def test_ssl_ocsp_called(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(RedisError) as e:
assert r.ping()
assert "cryptography not installed" in str(e)
@skip_if_nocryptography()
def test_ssl_ocsp_called_withcrypto(self, request):
r = self._create_oscp_conn(request)
with pytest.raises(ConnectionError) as e:
assert r.ping()
assert "No AIA information present in ssl certificate" in str(e)
# rediss://, url based
ssl_url = request.config.option.redis_ssl_url
sslclient = redis.from_url(ssl_url)
with pytest.raises(ConnectionError) as e:
sslclient.ping()
assert "No AIA information present in ssl certificate" in str(e)
@skip_if_nocryptography()
def test_valid_ocsp_cert_http(self):
from redis.ocsp import OCSPVerifier
hostnames = ["github.com", "aws.amazon.com", "ynet.co.il", "microsoft.com"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_revoked_ocsp_certificate(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "revoked.badssl.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid() is False
@skip_if_nocryptography()
def test_unauthorized_ocsp(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "stackoverflow.com"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
with pytest.raises(ConnectionError):
ocsp.is_valid()
@skip_if_nocryptography()
def test_ocsp_not_present_in_response(self):
from redis.ocsp import OCSPVerifier
context = ssl.create_default_context()
hostname = "google.co.il"
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid() is False
@skip_if_nocryptography()
def test_unauthorized_then_direct(self):
from redis.ocsp import OCSPVerifier
# these certificates on the socket end return unauthorized
# then the second call succeeds
hostnames = ["wikipedia.org", "squarespace.com"]
for hostname in hostnames:
context = ssl.create_default_context()
with socket.create_connection((hostname, 443)) as sock:
with context.wrap_socket(sock, server_hostname=hostname) as wrapped:
ocsp = OCSPVerifier(wrapped, hostname, 443)
assert ocsp.is_valid()
@skip_if_nocryptography()
def test_mock_ocsp_staple(self, request):
import OpenSSL
ssl_url = request.config.option.redis_ssl_url
p = urlparse(ssl_url)[1].split(":")
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp=True,
ssl_ocsp_context=p, # just needs to not be none
)
with pytest.raises(RedisError):
r.ping()
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
ctx.use_certificate_file(self.SERVER_CERT)
ctx.use_privatekey_file(self.SERVER_KEY)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_ocsp_context=ctx,
ssl_ocsp_expected_cert=open(self.SERVER_KEY, "rb").read(),
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
r = redis.Redis(
host=p[0],
port=p[1],
ssl=True,
ssl_certfile=self.SERVER_CERT,
ssl_keyfile=self.SERVER_KEY,
ssl_cert_reqs="required",
ssl_ca_certs=self.SERVER_CERT,
ssl_validate_ocsp_stapled=True,
)
with pytest.raises(ConnectionError) as e:
r.ping()
assert "no ocsp response present" in str(e)
|
en
| 0.820904
|
Tests for SSL connections This relies on the --redis-ssl-url purely for rebuilding the client and connecting to the appropriate port. # github actions package validation case # rediss://, url based # these certificates on the socket end return unauthorized # then the second call succeeds # just needs to not be none
| 2.466879
| 2
|
Tic_Tac_Toe.py
|
emiliebutez/Projet_TicTacToe
| 0
|
6625556
|
import math
import random
size = 4
def compterLignes(grille, symbole):
"""Compte le nombre de fois où le symbole est sur
une ligne qui peut gagner"""
maximum = 0
for ligne in grille:
count = 0
for case in ligne :
if case == symbole :
count += 1
elif case == -symbole :
count = 0
break
maximum = max(count, maximum)
return maximum
def compterColonnes(grille, symbole):
"""Compte le nombre de fois où le symbole est sur
une colonne qui peut gagner"""
maximum = 0
for x in range(size) :
count = 0
for y in range(size) :
if grille[y][x] == symbole :
count += 1
elif grille[y][x] == -symbole :
count = 0
break
maximum = max(maximum, count)
return maximum
def compterDiagonales(grille, symbole) :
"""Compte le nombre de fois où le symbole est sur
une diagonale qui peut gagner"""
diagA = 0
diagB = 0
for d in range(size) :
if grille[d][d] == symbole :
diagA += 1
elif grille[d][d] == -symbole :
diagA = 0
if grille[d][size - d - 1] == symbole :
diagB += 1
elif grille[d][size - d - 1] == -symbole :
diagB = 0
return max(diagA, diagB)
def carre(grille, symbole, position) :
"""Compte le nombre de fois où le symbole est sur
un carré qui peut gagner"""
x1,y1 = position
count = {symbole: 0, -symbole: 0, 0: 0}
for y2 in range(2) :
for x2 in range(2) :
count[grille[y1 + y2][x1+x2]] += 1
if count[-symbole] != 0 :
return 0
else :
return count[symbole]
def compterCarres(grille, symbole) :
"""Applique successivement la fonction square sur
tous les carrés de la grille"""
maximum = 0
for y in range(size - 1) :
for x in range(size - 1) :
maximum = max(maximum, carre(grille, symbole, (x,y)))
return maximum
def gagner(grille, symbole) :
""" Si une des quatre fonctions retourne
size alors le joueur avec "symbole" à
gagné la partie """
return max(
compterColonnes(grille, symbole),
compterLignes(grille, symbole),
compterCarres(grille, symbole),
compterDiagonales(grille, symbole)
) == size
def finPartie(grille) :
"""compte le nombre de zéros pour savoir
si la partie est terminée"""
count = 0
for ligne in grille :
for case in ligne :
if case == 0 :
count += 1
return count == 0
def heuristique(grille, symboleActuel) :
"""
Si on gagne avec le symbole actuel on renvoie
la valeur maximale pour que le minimax
prenne obligatoirement cette solution.
Si c'est l'adversaire qui gagne (-symbole) on retourne
la valeur minimale pour que le minimax ne
prenne pas cette solution.
Sinon on retourne le maximum des fonctions qui calcule le score
pour chaque configuration (ligne, colonne, carré et diagonale)
"""
if gagner(grille, symboleActuel) :
return +float('inf')
elif gagner(grille, -symboleActuel):
return -float('inf')
else :
return max(
compterColonnes(grille, symboleActuel),
compterLignes(grille, symboleActuel),
compterCarres(grille, symboleActuel),
compterDiagonales(grille, symboleActuel)
)
def minimax(fakeGrid, monSymbole, maximiser, profondeur) :
"""Algorithme minimax inspiré de la page wikipédia :
https://fr.wikipedia.org/wiki/Algorithme_minimax"""
score = heuristique(fakeGrid, monSymbole)
if abs(score) == float('inf') :
return score
if profondeur == 0 and not finPartie(fakeGrid) :
return score
if maximiser :
score = -float('inf')
else :
score = +float('inf')
for x in range(0, size) :
for y in range(0, size) :
if fakeGrid[x][y] != 0 :
continue
cpy = [g[:] for g in fakeGrid]
# On modifie la grille pour mettre le bon
# symbole à la case actuelle
if maximiser :
cpy[x][y] = monSymbole
else :
cpy[x][y] = -monSymbole
# On rappelle le minimax comme sur l'algorihtme
pscore = minimax(cpy, monSymbole, not maximiser, profondeur - 1)
if maximiser :
score = max(score, pscore)
else :
score = min(score, pscore)
return score
def myTicTacToe(grille, monSymbole):
"""Utilise l'algorithme minimax pour calculer le meilleur déplacement.
Modifier le parametre "profondeur" pour changer la précision de l'algorithme."""
score = -float('inf')
move = (-1,-1)
for x in range(size) :
for y in range(size) :
if grille[x][y] != 0 :
continue
#g[:] est une copie du tableau
cpy = [g[:] for g in grille]
cpy[x][y] = monSymbole
scoreminimax = minimax(cpy, monSymbole, maximiser=False, profondeur=2)
if scoreminimax >= score :
score = scoreminimax
move = x,y
return move
def check(tab):
global sum
sum = 0
motif = 0
global finished
finished = False
global winner
winner = -1
#check lines
for i in range(0,4):
sum = 0
for j in range(0,4):
sum = sum + tab[i][j]
#print("lines" + str(sum))
if math.fabs(sum) == 4:
motif = sum
#check columns
for i in range(0,4):
sum = 0
for j in range(0,4):
sum = sum + tab[j][i]
#print("columns" + str(sum))
if math.fabs(sum) == 4:
motif = sum
#check diags
sum = 0
for j in range(0,4):
sum = sum + tab[j][j]
if math.fabs(sum) == 4:
motif = sum
sum = 0
for j in range(0,4):
sum = sum + tab[j][3 - j]
if math.fabs(sum) == 4:
motif = sum
#check squares
for i in range(0,3):
for j in range(0,3):
sum = tab[i][j]+tab[i+1][j]+tab[i][j+1]+tab[i+1][j+1]
if math.fabs(sum) == 4:
motif = sum
if motif == 4:
finished = True
winner = 1
elif motif == -4:
finished = True
winner = -1
else :
finished = True
winner = 0
for i in range(0,4):
for j in range(0, 4):
if tab[i][j] == 0:
finished = False
return (winner, finished)
def tictactoeRandom(grille, monSymbole) :
x = random.randint(0,(size - 1))
y = random.randint(0,(size - 1))
#print(grille[x][y])
#while (grille[x][y] == monSymbole or (grille[x][y] + monSymbole) == 0):
while(grille[x][y] == -1 or grille[x][y] == 1):
x = random.randint(0, (size - 1))
y = random.randint(0, (size - 1))
return (x,y)
def affecterSymbole(grille, monSymbole, x, y):
#print(grille)
#print(x, y)
grille[x][y] = monSymbole
#print(grille)
def affichage(grille):
for i in range(0, size):
ch = ""
for j in range(0, size):
ch += str(grille[i][j])+" "
print(ch)
print()
grille = [0] * size
for i in range(size):
grille[i] = [0] * size
affichage(grille)
winner = 0
finished = False
while winner == 0 and finished is False:
monSymbole = -1
# print(grille)
(x, y) = tictactoeRandom(grille, monSymbole)
affecterSymbole(grille, monSymbole, x, y)
# print(grille)
print("Dummy player")
affichage(grille)
(winner, finished) = check(grille)
if winner == 0 and finished is False:
monSymbole = 1
(x, y) = myTicTacToe(grille, monSymbole)
# (x, y) = tictactoeRandom(grille, monSymbole)
affecterSymbole(grille, monSymbole, x, y)
(winner, finished) = check(grille)
print("Student player")
affichage(grille)
print(winner)
|
import math
import random
size = 4
def compterLignes(grille, symbole):
"""Compte le nombre de fois où le symbole est sur
une ligne qui peut gagner"""
maximum = 0
for ligne in grille:
count = 0
for case in ligne :
if case == symbole :
count += 1
elif case == -symbole :
count = 0
break
maximum = max(count, maximum)
return maximum
def compterColonnes(grille, symbole):
"""Compte le nombre de fois où le symbole est sur
une colonne qui peut gagner"""
maximum = 0
for x in range(size) :
count = 0
for y in range(size) :
if grille[y][x] == symbole :
count += 1
elif grille[y][x] == -symbole :
count = 0
break
maximum = max(maximum, count)
return maximum
def compterDiagonales(grille, symbole) :
"""Compte le nombre de fois où le symbole est sur
une diagonale qui peut gagner"""
diagA = 0
diagB = 0
for d in range(size) :
if grille[d][d] == symbole :
diagA += 1
elif grille[d][d] == -symbole :
diagA = 0
if grille[d][size - d - 1] == symbole :
diagB += 1
elif grille[d][size - d - 1] == -symbole :
diagB = 0
return max(diagA, diagB)
def carre(grille, symbole, position) :
"""Compte le nombre de fois où le symbole est sur
un carré qui peut gagner"""
x1,y1 = position
count = {symbole: 0, -symbole: 0, 0: 0}
for y2 in range(2) :
for x2 in range(2) :
count[grille[y1 + y2][x1+x2]] += 1
if count[-symbole] != 0 :
return 0
else :
return count[symbole]
def compterCarres(grille, symbole) :
"""Applique successivement la fonction square sur
tous les carrés de la grille"""
maximum = 0
for y in range(size - 1) :
for x in range(size - 1) :
maximum = max(maximum, carre(grille, symbole, (x,y)))
return maximum
def gagner(grille, symbole) :
""" Si une des quatre fonctions retourne
size alors le joueur avec "symbole" à
gagné la partie """
return max(
compterColonnes(grille, symbole),
compterLignes(grille, symbole),
compterCarres(grille, symbole),
compterDiagonales(grille, symbole)
) == size
def finPartie(grille) :
"""compte le nombre de zéros pour savoir
si la partie est terminée"""
count = 0
for ligne in grille :
for case in ligne :
if case == 0 :
count += 1
return count == 0
def heuristique(grille, symboleActuel) :
"""
Si on gagne avec le symbole actuel on renvoie
la valeur maximale pour que le minimax
prenne obligatoirement cette solution.
Si c'est l'adversaire qui gagne (-symbole) on retourne
la valeur minimale pour que le minimax ne
prenne pas cette solution.
Sinon on retourne le maximum des fonctions qui calcule le score
pour chaque configuration (ligne, colonne, carré et diagonale)
"""
if gagner(grille, symboleActuel) :
return +float('inf')
elif gagner(grille, -symboleActuel):
return -float('inf')
else :
return max(
compterColonnes(grille, symboleActuel),
compterLignes(grille, symboleActuel),
compterCarres(grille, symboleActuel),
compterDiagonales(grille, symboleActuel)
)
def minimax(fakeGrid, monSymbole, maximiser, profondeur) :
"""Algorithme minimax inspiré de la page wikipédia :
https://fr.wikipedia.org/wiki/Algorithme_minimax"""
score = heuristique(fakeGrid, monSymbole)
if abs(score) == float('inf') :
return score
if profondeur == 0 and not finPartie(fakeGrid) :
return score
if maximiser :
score = -float('inf')
else :
score = +float('inf')
for x in range(0, size) :
for y in range(0, size) :
if fakeGrid[x][y] != 0 :
continue
cpy = [g[:] for g in fakeGrid]
# On modifie la grille pour mettre le bon
# symbole à la case actuelle
if maximiser :
cpy[x][y] = monSymbole
else :
cpy[x][y] = -monSymbole
# On rappelle le minimax comme sur l'algorihtme
pscore = minimax(cpy, monSymbole, not maximiser, profondeur - 1)
if maximiser :
score = max(score, pscore)
else :
score = min(score, pscore)
return score
def myTicTacToe(grille, monSymbole):
"""Utilise l'algorithme minimax pour calculer le meilleur déplacement.
Modifier le parametre "profondeur" pour changer la précision de l'algorithme."""
score = -float('inf')
move = (-1,-1)
for x in range(size) :
for y in range(size) :
if grille[x][y] != 0 :
continue
#g[:] est une copie du tableau
cpy = [g[:] for g in grille]
cpy[x][y] = monSymbole
scoreminimax = minimax(cpy, monSymbole, maximiser=False, profondeur=2)
if scoreminimax >= score :
score = scoreminimax
move = x,y
return move
def check(tab):
global sum
sum = 0
motif = 0
global finished
finished = False
global winner
winner = -1
#check lines
for i in range(0,4):
sum = 0
for j in range(0,4):
sum = sum + tab[i][j]
#print("lines" + str(sum))
if math.fabs(sum) == 4:
motif = sum
#check columns
for i in range(0,4):
sum = 0
for j in range(0,4):
sum = sum + tab[j][i]
#print("columns" + str(sum))
if math.fabs(sum) == 4:
motif = sum
#check diags
sum = 0
for j in range(0,4):
sum = sum + tab[j][j]
if math.fabs(sum) == 4:
motif = sum
sum = 0
for j in range(0,4):
sum = sum + tab[j][3 - j]
if math.fabs(sum) == 4:
motif = sum
#check squares
for i in range(0,3):
for j in range(0,3):
sum = tab[i][j]+tab[i+1][j]+tab[i][j+1]+tab[i+1][j+1]
if math.fabs(sum) == 4:
motif = sum
if motif == 4:
finished = True
winner = 1
elif motif == -4:
finished = True
winner = -1
else :
finished = True
winner = 0
for i in range(0,4):
for j in range(0, 4):
if tab[i][j] == 0:
finished = False
return (winner, finished)
def tictactoeRandom(grille, monSymbole) :
x = random.randint(0,(size - 1))
y = random.randint(0,(size - 1))
#print(grille[x][y])
#while (grille[x][y] == monSymbole or (grille[x][y] + monSymbole) == 0):
while(grille[x][y] == -1 or grille[x][y] == 1):
x = random.randint(0, (size - 1))
y = random.randint(0, (size - 1))
return (x,y)
def affecterSymbole(grille, monSymbole, x, y):
#print(grille)
#print(x, y)
grille[x][y] = monSymbole
#print(grille)
def affichage(grille):
for i in range(0, size):
ch = ""
for j in range(0, size):
ch += str(grille[i][j])+" "
print(ch)
print()
grille = [0] * size
for i in range(size):
grille[i] = [0] * size
affichage(grille)
winner = 0
finished = False
while winner == 0 and finished is False:
monSymbole = -1
# print(grille)
(x, y) = tictactoeRandom(grille, monSymbole)
affecterSymbole(grille, monSymbole, x, y)
# print(grille)
print("Dummy player")
affichage(grille)
(winner, finished) = check(grille)
if winner == 0 and finished is False:
monSymbole = 1
(x, y) = myTicTacToe(grille, monSymbole)
# (x, y) = tictactoeRandom(grille, monSymbole)
affecterSymbole(grille, monSymbole, x, y)
(winner, finished) = check(grille)
print("Student player")
affichage(grille)
print(winner)
|
fr
| 0.980971
|
Compte le nombre de fois où le symbole est sur une ligne qui peut gagner Compte le nombre de fois où le symbole est sur une colonne qui peut gagner Compte le nombre de fois où le symbole est sur une diagonale qui peut gagner Compte le nombre de fois où le symbole est sur un carré qui peut gagner Applique successivement la fonction square sur tous les carrés de la grille Si une des quatre fonctions retourne size alors le joueur avec "symbole" à gagné la partie compte le nombre de zéros pour savoir si la partie est terminée Si on gagne avec le symbole actuel on renvoie la valeur maximale pour que le minimax prenne obligatoirement cette solution. Si c'est l'adversaire qui gagne (-symbole) on retourne la valeur minimale pour que le minimax ne prenne pas cette solution. Sinon on retourne le maximum des fonctions qui calcule le score pour chaque configuration (ligne, colonne, carré et diagonale) Algorithme minimax inspiré de la page wikipédia : https://fr.wikipedia.org/wiki/Algorithme_minimax # On modifie la grille pour mettre le bon # symbole à la case actuelle # On rappelle le minimax comme sur l'algorihtme Utilise l'algorithme minimax pour calculer le meilleur déplacement. Modifier le parametre "profondeur" pour changer la précision de l'algorithme. #g[:] est une copie du tableau #check lines #print("lines" + str(sum)) #check columns #print("columns" + str(sum)) #check diags #check squares #print(grille[x][y]) #while (grille[x][y] == monSymbole or (grille[x][y] + monSymbole) == 0): #print(grille) #print(x, y) #print(grille) # print(grille) # print(grille) # (x, y) = tictactoeRandom(grille, monSymbole)
| 3.426378
| 3
|
4.torch.nn/Containers1.py
|
TwT520Ly/Pytorch-Study
| 0
|
6625557
|
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
class Model2(nn.Module):
def __init__(self):
super(Model2, self).__init__()
self.add_module("conv1", nn.Conv2d(1, 20, 5))
class Model3(Model1):
def __init__(self):
super(Model3, self).__init__()
# base on model1 and add the "conv3"
sub_module = nn.Conv2d(20, 20, 5)
self.add_module("conv3", sub_module)
self.add_module("conv4", sub_module)
model2 = Model2()
print(model2.conv1)
model3 = Model3()
print(model3.conv2)
print(model3.conv3)
# print(model3.children())
print('-----')
for sub_module in model3.children():
print(sub_module)
print('-----')
for sub_module in model3.modules():
print(sub_module)
print('-----')
for name, module in model3.named_children():
if name in ['conv3']:
print(True)
print('-----')
for name, param in model3.named_parameters():
if name in ['conv3.weight', 'conv3.bias']:
print(type(param.data), param.size())
# model3.cpu()
# model3.cuda(device=1)
# model3.double()
# model3.float()
# model3.half()
## Dropout、BN
# model3.eval()
# model3.train()
|
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class Model1(nn.Module):
def __init__(self):
super(Model1, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5)
self.conv2 = nn.Conv2d(20, 20, 5)
def forward(self, x):
x = F.relu(self.conv1(x))
return F.relu(self.conv2(x))
class Model2(nn.Module):
def __init__(self):
super(Model2, self).__init__()
self.add_module("conv1", nn.Conv2d(1, 20, 5))
class Model3(Model1):
def __init__(self):
super(Model3, self).__init__()
# base on model1 and add the "conv3"
sub_module = nn.Conv2d(20, 20, 5)
self.add_module("conv3", sub_module)
self.add_module("conv4", sub_module)
model2 = Model2()
print(model2.conv1)
model3 = Model3()
print(model3.conv2)
print(model3.conv3)
# print(model3.children())
print('-----')
for sub_module in model3.children():
print(sub_module)
print('-----')
for sub_module in model3.modules():
print(sub_module)
print('-----')
for name, module in model3.named_children():
if name in ['conv3']:
print(True)
print('-----')
for name, param in model3.named_parameters():
if name in ['conv3.weight', 'conv3.bias']:
print(type(param.data), param.size())
# model3.cpu()
# model3.cuda(device=1)
# model3.double()
# model3.float()
# model3.half()
## Dropout、BN
# model3.eval()
# model3.train()
|
es
| 0.289815
|
# base on model1 and add the "conv3" # print(model3.children()) # model3.cpu() # model3.cuda(device=1) # model3.double() # model3.float() # model3.half() ## Dropout、BN # model3.eval() # model3.train()
| 2.972529
| 3
|
scripts/windb_symbols/apply_windbg_symbols.py
|
CrackerCat/idawilli
| 0
|
6625558
|
<reponame>CrackerCat/idawilli<filename>scripts/windb_symbols/apply_windbg_symbols.py
from idaapi import *
from idc import *
# This script prompts for the path to a file
# which contains a three column, whitespace-delimited list
#
# addr deref'd function
# 00bca02c 77dd79c6 ADVAPI32!InitializeSecurityDescriptor
def SetName(ea, s):
idaapi.set_name(ea, s)
def is_32():
try:
_ = __EA64__
return False
except:
return True
def make_pointer(ea):
if is_32():
MakeUnkn(ea, 4)
MakeDword(ea)
else:
MakeUnkn(ea, 8)
MakeQword(ea)
# --------------------------------------------------------------------------
class MyForm(Form):
def __init__(self):
self.invert = False
Form.__init__(self, r"""STARTITEM
<#Select an annotation file to open#Browse to open:{iFileOpen}>
""", { 'iFileOpen': Form.FileInput(open=True), })
def OnFormChange(self, fid):
return 1
try:
f = MyForm()
f.Compile()
f.iFileOpen.value = ""
ok = f.Execute()
if ok == 1:
print f.iFileOpen.value
with open(f.iFileOpen.value, "rb") as g:
for line in g.read().split("\n"):
line = line.replace("`", "")
parts = line.split(" ")
if len(parts) != 4:
continue
try:
address = int(parts[0], 0x10)
except:
continue
function = parts[3].strip()
dll, _, name = function.partition("!")
print "%s %s" % (hex(address), name)
make_pointer(address)
SetName(address, name)
f.Free()
except Exception as e:
print "Unexpected error: ", e
print "Done."
|
from idaapi import *
from idc import *
# This script prompts for the path to a file
# which contains a three column, whitespace-delimited list
#
# addr deref'd function
# 00bca02c 77dd79c6 ADVAPI32!InitializeSecurityDescriptor
def SetName(ea, s):
idaapi.set_name(ea, s)
def is_32():
try:
_ = __EA64__
return False
except:
return True
def make_pointer(ea):
if is_32():
MakeUnkn(ea, 4)
MakeDword(ea)
else:
MakeUnkn(ea, 8)
MakeQword(ea)
# --------------------------------------------------------------------------
class MyForm(Form):
def __init__(self):
self.invert = False
Form.__init__(self, r"""STARTITEM
<#Select an annotation file to open#Browse to open:{iFileOpen}>
""", { 'iFileOpen': Form.FileInput(open=True), })
def OnFormChange(self, fid):
return 1
try:
f = MyForm()
f.Compile()
f.iFileOpen.value = ""
ok = f.Execute()
if ok == 1:
print f.iFileOpen.value
with open(f.iFileOpen.value, "rb") as g:
for line in g.read().split("\n"):
line = line.replace("`", "")
parts = line.split(" ")
if len(parts) != 4:
continue
try:
address = int(parts[0], 0x10)
except:
continue
function = parts[3].strip()
dll, _, name = function.partition("!")
print "%s %s" % (hex(address), name)
make_pointer(address)
SetName(address, name)
f.Free()
except Exception as e:
print "Unexpected error: ", e
print "Done."
|
en
| 0.482393
|
# This script prompts for the path to a file # which contains a three column, whitespace-delimited list # # addr deref'd function # 00bca02c 77dd79c6 ADVAPI32!InitializeSecurityDescriptor # -------------------------------------------------------------------------- STARTITEM <#Select an annotation file to open#Browse to open:{iFileOpen}>
| 2.536058
| 3
|
test/converter/test_helper.py
|
ideascf/data-packer
| 2
|
6625559
|
<reponame>ideascf/data-packer
# coding=utf-8
from data_packer import converter, err
def test_TypeConverter():
cvt = converter.TypeConverter(int)
assert cvt.convert('', '', '123') == 123
def test_StrConverter():
cvt = converter.StrConverter('utf-8')
assert cvt.convert('', '', 123) == '123'
assert cvt.convert('', '', u'你好') == '你好'
assert cvt.convert('', '', '你好') == '你好'
cvt = converter.StrConverter('GBK')
assert cvt.convert('', '', 123) == '123'.decode('utf-8').encode('GBK')
assert cvt.convert('', '', u'你好') == '你好'.decode('utf-8').encode('GBK')
assert cvt.convert('', '', '你好') == '你好'
def test_UnicodeConverter():
cvt = converter.UnicodeConverter('utf-8')
assert cvt.convert('', '', 123) == u'123'
assert cvt.convert('', '', '你好') == u'你好'
assert cvt.convert('', '', u'你好') == u'你好'
cvt = converter.UnicodeConverter('GBK')
assert cvt.convert('', '', 123) == '123'.decode('GBK')
assert cvt.convert('', '', '你好') == '你好'.decode('GBK')
assert cvt.convert('', '', u'你好') == u'你好'
|
# coding=utf-8
from data_packer import converter, err
def test_TypeConverter():
cvt = converter.TypeConverter(int)
assert cvt.convert('', '', '123') == 123
def test_StrConverter():
cvt = converter.StrConverter('utf-8')
assert cvt.convert('', '', 123) == '123'
assert cvt.convert('', '', u'你好') == '你好'
assert cvt.convert('', '', '你好') == '你好'
cvt = converter.StrConverter('GBK')
assert cvt.convert('', '', 123) == '123'.decode('utf-8').encode('GBK')
assert cvt.convert('', '', u'你好') == '你好'.decode('utf-8').encode('GBK')
assert cvt.convert('', '', '你好') == '你好'
def test_UnicodeConverter():
cvt = converter.UnicodeConverter('utf-8')
assert cvt.convert('', '', 123) == u'123'
assert cvt.convert('', '', '你好') == u'你好'
assert cvt.convert('', '', u'你好') == u'你好'
cvt = converter.UnicodeConverter('GBK')
assert cvt.convert('', '', 123) == '123'.decode('GBK')
assert cvt.convert('', '', '你好') == '你好'.decode('GBK')
assert cvt.convert('', '', u'你好') == u'你好'
|
en
| 0.644078
|
# coding=utf-8
| 2.668716
| 3
|
pydlock/__main__.py
|
ErickShepherd/pydlock
| 0
|
6625560
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
A command line utility for the Pydlock package.
Software: Pydlock
Author: <NAME>
E-mail: <EMAIL>
GitHub: https://www.github.com/ErickShepherd/pydlock
PyPI: https://pypi.org/project/pydlock/
Date created: 2020-04-30
Last modified: 2020-04-30
Description:
A command line utility for the Pydlock package, which allows users to lock
and unlock files with a password, or run Python scripts locked by Pydlock.
Usage:
This module may be executed from the command line as a Python script:
python -m pydlock
Running the script without any arguments will display the usage:
usage: pydlock.py [-h] [--arguments ARGUMENTS] [--encoding ENCODING]
{lock,unlock,python,run} file
Supported operations include:
lock: Encrypts a file in-place.
unlock: Decrypts a file in-place.
python: Decrypts and runs the contents of a Python file.
run: Temporarily decrypts, runs, and re-encrypts an arbitrary file.
Example:
python -m pydlock lock example.txt --encoding=utf-8
Copyright:
Pydlock - A Python file encryption tool.
Copyright (c) 2020 of <NAME>, all rights reserved.
License:
This file is part of "Pydlock" (the "Software").
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the right to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Notes:
Issues with use on Windows executables:
Because the files are modified, locking and unlocking executables on
Windows does not preserve their checksum. Consequently, after locking
and unlocking an executable on Windows, when an execution is attempted,
the system raises an error for security purposes:
"This version of <file> is not compatible with the version of
Windows you're running. Check your computer's system information
and then contact the software publisher."
There does not appear to be a simple resolution for this issue, and the
files effectively become corrupted.
'''
# Standard library imports.
import os
from argparse import ArgumentParser
# Local application imports.
import pydlock
from pydlock.constants import DEFAULT_ENCODING
# Dunder definitions.
__author__ = pydlock.__author__
__version__ = pydlock.__version__
if __name__ == "__main__":
# Maps function names to the respective function.
function_map = {
"lock" : pydlock.lock,
"unlock" : pydlock.unlock,
"python" : pydlock.python,
"run" : pydlock.run
}
# Parses command-line arguments from the user.
parser = ArgumentParser()
parser.add_argument("operation", choices = function_map.keys())
parser.add_argument("file", type = os.path.abspath)
parser.add_argument("--arguments", type = str, default = "")
parser.add_argument("--encoding", type = str, default = DEFAULT_ENCODING)
kwargv = vars(parser.parse_args())
# Aliases parsed command-line arguments for brevity.
task = function_map[kwargv["operation"]]
path = kwargv["file"]
arguments = kwargv["arguments"]
encoding = kwargv["encoding"]
# Performs the indicated task.
task(path, arguments, encoding)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
A command line utility for the Pydlock package.
Software: Pydlock
Author: <NAME>
E-mail: <EMAIL>
GitHub: https://www.github.com/ErickShepherd/pydlock
PyPI: https://pypi.org/project/pydlock/
Date created: 2020-04-30
Last modified: 2020-04-30
Description:
A command line utility for the Pydlock package, which allows users to lock
and unlock files with a password, or run Python scripts locked by Pydlock.
Usage:
This module may be executed from the command line as a Python script:
python -m pydlock
Running the script without any arguments will display the usage:
usage: pydlock.py [-h] [--arguments ARGUMENTS] [--encoding ENCODING]
{lock,unlock,python,run} file
Supported operations include:
lock: Encrypts a file in-place.
unlock: Decrypts a file in-place.
python: Decrypts and runs the contents of a Python file.
run: Temporarily decrypts, runs, and re-encrypts an arbitrary file.
Example:
python -m pydlock lock example.txt --encoding=utf-8
Copyright:
Pydlock - A Python file encryption tool.
Copyright (c) 2020 of <NAME>, all rights reserved.
License:
This file is part of "Pydlock" (the "Software").
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the right to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Notes:
Issues with use on Windows executables:
Because the files are modified, locking and unlocking executables on
Windows does not preserve their checksum. Consequently, after locking
and unlocking an executable on Windows, when an execution is attempted,
the system raises an error for security purposes:
"This version of <file> is not compatible with the version of
Windows you're running. Check your computer's system information
and then contact the software publisher."
There does not appear to be a simple resolution for this issue, and the
files effectively become corrupted.
'''
# Standard library imports.
import os
from argparse import ArgumentParser
# Local application imports.
import pydlock
from pydlock.constants import DEFAULT_ENCODING
# Dunder definitions.
__author__ = pydlock.__author__
__version__ = pydlock.__version__
if __name__ == "__main__":
# Maps function names to the respective function.
function_map = {
"lock" : pydlock.lock,
"unlock" : pydlock.unlock,
"python" : pydlock.python,
"run" : pydlock.run
}
# Parses command-line arguments from the user.
parser = ArgumentParser()
parser.add_argument("operation", choices = function_map.keys())
parser.add_argument("file", type = os.path.abspath)
parser.add_argument("--arguments", type = str, default = "")
parser.add_argument("--encoding", type = str, default = DEFAULT_ENCODING)
kwargv = vars(parser.parse_args())
# Aliases parsed command-line arguments for brevity.
task = function_map[kwargv["operation"]]
path = kwargv["file"]
arguments = kwargv["arguments"]
encoding = kwargv["encoding"]
# Performs the indicated task.
task(path, arguments, encoding)
|
en
| 0.796695
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- A command line utility for the Pydlock package. Software: Pydlock Author: <NAME> E-mail: <EMAIL> GitHub: https://www.github.com/ErickShepherd/pydlock PyPI: https://pypi.org/project/pydlock/ Date created: 2020-04-30 Last modified: 2020-04-30 Description: A command line utility for the Pydlock package, which allows users to lock and unlock files with a password, or run Python scripts locked by Pydlock. Usage: This module may be executed from the command line as a Python script: python -m pydlock Running the script without any arguments will display the usage: usage: pydlock.py [-h] [--arguments ARGUMENTS] [--encoding ENCODING] {lock,unlock,python,run} file Supported operations include: lock: Encrypts a file in-place. unlock: Decrypts a file in-place. python: Decrypts and runs the contents of a Python file. run: Temporarily decrypts, runs, and re-encrypts an arbitrary file. Example: python -m pydlock lock example.txt --encoding=utf-8 Copyright: Pydlock - A Python file encryption tool. Copyright (c) 2020 of <NAME>, all rights reserved. License: This file is part of "Pydlock" (the "Software"). MIT License Copyright (c) 2020 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the right to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Notes: Issues with use on Windows executables: Because the files are modified, locking and unlocking executables on Windows does not preserve their checksum. Consequently, after locking and unlocking an executable on Windows, when an execution is attempted, the system raises an error for security purposes: "This version of <file> is not compatible with the version of Windows you're running. Check your computer's system information and then contact the software publisher." There does not appear to be a simple resolution for this issue, and the files effectively become corrupted. # Standard library imports. # Local application imports. # Dunder definitions. # Maps function names to the respective function. # Parses command-line arguments from the user. # Aliases parsed command-line arguments for brevity. # Performs the indicated task.
| 2.611624
| 3
|
tests/endpoints/test_share_invite_bank_response.py
|
PJUllrich/Universal-Bunq-API-Python-Wrapper
| 0
|
6625561
|
<filename>tests/endpoints/test_share_invite_bank_response.py<gh_stars>0
from apiwrapper.endpoints.share_invite_bank_response import \
ShareInviteBankResponse
from tests.endpoints.test_endpoint import EndpointTest
class ShareInviteBankResponseTest(EndpointTest):
__base_endpoint_url = "/user/%d/share-invite-bank-response"
@property
def _base_endpoint(self):
return self.__base_endpoint_url % self.random_id
def setUp(self):
super().setUp(ShareInviteBankResponse)
def test_get_base_endpoint(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class._get_base_endpoint(
self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_all_invite_responses(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class.get_all_invite_responses(
self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_invite_response_by_id(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d" % self.random_id
endpoint_to_check = self.test_class.get_invite_response_by_id(
self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
|
<filename>tests/endpoints/test_share_invite_bank_response.py<gh_stars>0
from apiwrapper.endpoints.share_invite_bank_response import \
ShareInviteBankResponse
from tests.endpoints.test_endpoint import EndpointTest
class ShareInviteBankResponseTest(EndpointTest):
__base_endpoint_url = "/user/%d/share-invite-bank-response"
@property
def _base_endpoint(self):
return self.__base_endpoint_url % self.random_id
def setUp(self):
super().setUp(ShareInviteBankResponse)
def test_get_base_endpoint(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class._get_base_endpoint(
self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_all_invite_responses(self):
endpoint_should_be = self._base_endpoint
endpoint_to_check = self.test_class.get_all_invite_responses(
self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
def test_get_invite_response_by_id(self):
endpoint_should_be = self._base_endpoint
endpoint_should_be += "/%d" % self.random_id
endpoint_to_check = self.test_class.get_invite_response_by_id(
self.random_id, self.random_id)
self.assert_parameters(endpoint_should_be, endpoint_to_check)
|
none
| 1
| 2.457897
| 2
|
|
back/index.py
|
mkgask/electron-python
| 0
|
6625562
|
<reponame>mkgask/electron-python<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
next = u'次のページへ行くよー'
return render_template('/index.html', message=next)
@app.route('/sample/')
def sample():
return render_template('/sample/sample.html')
if __name__ == "__main__":
app.run(host="127.0.0.1", port=5000)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
next = u'次のページへ行くよー'
return render_template('/index.html', message=next)
@app.route('/sample/')
def sample():
return render_template('/sample/sample.html')
if __name__ == "__main__":
app.run(host="127.0.0.1", port=5000)
|
en
| 0.308914
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*-
| 2.371044
| 2
|
venv/Lib/site-packages/pycparser/ast_transforms.py
|
asanka9/Quession-Discussion-App-Socket.Io-NLP
| 1,738
|
6625563
|
#------------------------------------------------------------------------------
# pycparser: ast_transforms.py
#
# Some utilities used by the parser to create a friendlier AST.
#
# <NAME> [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
def fix_switch_cases(switch_node):
""" The 'case' statements in a 'switch' come out of parsing with one
child node, so subsequent statements are just tucked to the parent
Compound. Additionally, consecutive (fall-through) case statements
come out messy. This is a peculiarity of the C grammar. The following:
switch (myvar) {
case 10:
k = 10;
p = k + 1;
return 10;
case 20:
case 30:
return 20;
default:
break;
}
Creates this tree (pseudo-dump):
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
The goal of this transform is to fix this mess, turning it into the
following:
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
A fixed AST node is returned. The argument may be modified.
"""
assert isinstance(switch_node, c_ast.Switch)
if not isinstance(switch_node.stmt, c_ast.Compound):
return switch_node
# The new Compound child for the Switch, which will collect children in the
# correct order
new_compound = c_ast.Compound([], switch_node.stmt.coord)
# The last Case/Default node
last_case = None
# Goes over the children of the Compound below the Switch, adding them
# either directly below new_compound or below the last Case as appropriate
# (for `switch(cond) {}`, block_items would have been None)
for child in (switch_node.stmt.block_items or []):
if isinstance(child, (c_ast.Case, c_ast.Default)):
# If it's a Case/Default:
# 1. Add it to the Compound and mark as "last case"
# 2. If its immediate child is also a Case or Default, promote it
# to a sibling.
new_compound.block_items.append(child)
_extract_nested_case(child, new_compound.block_items)
last_case = new_compound.block_items[-1]
else:
# Other statements are added as children to the last case, if it
# exists.
if last_case is None:
new_compound.block_items.append(child)
else:
last_case.stmts.append(child)
switch_node.stmt = new_compound
return switch_node
def _extract_nested_case(case_node, stmts_list):
""" Recursively extract consecutive Case statements that are made nested
by the parser and add them to the stmts_list.
"""
if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)):
stmts_list.append(case_node.stmts.pop())
_extract_nested_case(stmts_list[-1], stmts_list)
|
#------------------------------------------------------------------------------
# pycparser: ast_transforms.py
#
# Some utilities used by the parser to create a friendlier AST.
#
# <NAME> [https://eli.thegreenplace.net/]
# License: BSD
#------------------------------------------------------------------------------
from . import c_ast
def fix_switch_cases(switch_node):
""" The 'case' statements in a 'switch' come out of parsing with one
child node, so subsequent statements are just tucked to the parent
Compound. Additionally, consecutive (fall-through) case statements
come out messy. This is a peculiarity of the C grammar. The following:
switch (myvar) {
case 10:
k = 10;
p = k + 1;
return 10;
case 20:
case 30:
return 20;
default:
break;
}
Creates this tree (pseudo-dump):
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
The goal of this transform is to fix this mess, turning it into the
following:
Switch
ID: myvar
Compound:
Case 10:
k = 10
p = k + 1
return 10
Case 20:
Case 30:
return 20
Default:
break
A fixed AST node is returned. The argument may be modified.
"""
assert isinstance(switch_node, c_ast.Switch)
if not isinstance(switch_node.stmt, c_ast.Compound):
return switch_node
# The new Compound child for the Switch, which will collect children in the
# correct order
new_compound = c_ast.Compound([], switch_node.stmt.coord)
# The last Case/Default node
last_case = None
# Goes over the children of the Compound below the Switch, adding them
# either directly below new_compound or below the last Case as appropriate
# (for `switch(cond) {}`, block_items would have been None)
for child in (switch_node.stmt.block_items or []):
if isinstance(child, (c_ast.Case, c_ast.Default)):
# If it's a Case/Default:
# 1. Add it to the Compound and mark as "last case"
# 2. If its immediate child is also a Case or Default, promote it
# to a sibling.
new_compound.block_items.append(child)
_extract_nested_case(child, new_compound.block_items)
last_case = new_compound.block_items[-1]
else:
# Other statements are added as children to the last case, if it
# exists.
if last_case is None:
new_compound.block_items.append(child)
else:
last_case.stmts.append(child)
switch_node.stmt = new_compound
return switch_node
def _extract_nested_case(case_node, stmts_list):
""" Recursively extract consecutive Case statements that are made nested
by the parser and add them to the stmts_list.
"""
if isinstance(case_node.stmts[0], (c_ast.Case, c_ast.Default)):
stmts_list.append(case_node.stmts.pop())
_extract_nested_case(stmts_list[-1], stmts_list)
|
en
| 0.796435
|
#------------------------------------------------------------------------------ # pycparser: ast_transforms.py # # Some utilities used by the parser to create a friendlier AST. # # <NAME> [https://eli.thegreenplace.net/] # License: BSD #------------------------------------------------------------------------------ The 'case' statements in a 'switch' come out of parsing with one child node, so subsequent statements are just tucked to the parent Compound. Additionally, consecutive (fall-through) case statements come out messy. This is a peculiarity of the C grammar. The following: switch (myvar) { case 10: k = 10; p = k + 1; return 10; case 20: case 30: return 20; default: break; } Creates this tree (pseudo-dump): Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break The goal of this transform is to fix this mess, turning it into the following: Switch ID: myvar Compound: Case 10: k = 10 p = k + 1 return 10 Case 20: Case 30: return 20 Default: break A fixed AST node is returned. The argument may be modified. # The new Compound child for the Switch, which will collect children in the # correct order # The last Case/Default node # Goes over the children of the Compound below the Switch, adding them # either directly below new_compound or below the last Case as appropriate # (for `switch(cond) {}`, block_items would have been None) # If it's a Case/Default: # 1. Add it to the Compound and mark as "last case" # 2. If its immediate child is also a Case or Default, promote it # to a sibling. # Other statements are added as children to the last case, if it # exists. Recursively extract consecutive Case statements that are made nested by the parser and add them to the stmts_list.
| 2.894763
| 3
|
day01_test.py
|
Elgolfin/adventofcode-2015-py
| 0
|
6625564
|
"""Day 01 unit tests"""
import unittest
from day01_lib import walk_through_floors
class Day01TestCase(unittest.TestCase):
"""Tests for `day01.py`"""
def test_(self):
"""Tests for the final floor"""
floor, enter_basement_at = walk_through_floors("(()))")
self.assertEqual(floor, -1)
self.assertEqual(enter_basement_at, 5)
if __name__ == '__main__':
unittest.main()
|
"""Day 01 unit tests"""
import unittest
from day01_lib import walk_through_floors
class Day01TestCase(unittest.TestCase):
"""Tests for `day01.py`"""
def test_(self):
"""Tests for the final floor"""
floor, enter_basement_at = walk_through_floors("(()))")
self.assertEqual(floor, -1)
self.assertEqual(enter_basement_at, 5)
if __name__ == '__main__':
unittest.main()
|
en
| 0.81494
|
Day 01 unit tests Tests for `day01.py` Tests for the final floor
| 3.386171
| 3
|
bots/steam/info.py
|
kosyachniy/dev
| 13
|
6625565
|
<gh_stars>10-100
from func.steam import *
print(api.ISteamUser.ResolveVanityURL(vanityurl="valve", url_type=2))
client = SteamClient()
client.cli_login()
print("Logged on as: %s" % client.user.name)
print("Community profile: %s" % client.steam_id.community_url)
print("Last logon: %s" % client.user.last_logon)
print("Last logoff: %s" % client.user.last_logoff)
print("Number of friends: %d" % len(client.friends))
client.logout()
|
from func.steam import *
print(api.ISteamUser.ResolveVanityURL(vanityurl="valve", url_type=2))
client = SteamClient()
client.cli_login()
print("Logged on as: %s" % client.user.name)
print("Community profile: %s" % client.steam_id.community_url)
print("Last logon: %s" % client.user.last_logon)
print("Last logoff: %s" % client.user.last_logoff)
print("Number of friends: %d" % len(client.friends))
client.logout()
|
none
| 1
| 2.306624
| 2
|
|
security_monkey/watchers/vpc/dhcp.py
|
boladmin/security_monkey
| 4,258
|
6625566
|
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.dhcp
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <<EMAIL>>
"""
from security_monkey.decorators import record_exception, iter_account_region
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey import app
class DHCP(Watcher):
index = 'dhcp'
i_am_singular = 'DHCP Option Set'
i_am_plural = 'DHCP Option Sets'
def __init__(self, accounts=None, debug=False):
super(DHCP, self).__init__(accounts=accounts, debug=debug)
@record_exception()
def describe_dhcp_options(self, **kwargs):
from security_monkey.common.sts_connect import connect
conn = connect(kwargs['account_name'], 'boto3.ec2.client', region=kwargs['region'],
assumed_role=kwargs['assumed_role'])
dhcp_option_sets_resp = self.wrap_aws_rate_limited_call(
conn.describe_dhcp_options)
dhcp_option_sets = dhcp_option_sets_resp.get('DhcpOptions', [])
return dhcp_option_sets
def slurp(self):
"""
:returns: item_list - list of dhcp option sets.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
@iter_account_region(index=self.index, accounts=self.accounts, service_name='ec2')
def slurp_items(**kwargs):
item_list = []
exception_map = {}
kwargs['exception_map'] = exception_map
app.logger.debug("Checking {}/{}/{}".format(self.index,
kwargs['account_name'], kwargs['region']))
dhcp_option_sets = self.describe_dhcp_options(**kwargs)
if dhcp_option_sets:
app.logger.debug("Found {} {}".format(
len(dhcp_option_sets), self.i_am_plural))
for dhcpopt in dhcp_option_sets:
dhcpopt_id = dhcpopt.get('DhcpOptionsId')
if self.check_ignore_list(dhcpopt_id):
continue
dhcpopt_configurations = dhcpopt.get(
'DhcpConfigurations', [])
config = {'id': dhcpopt_id}
for option in dhcpopt_configurations:
key = option['Key']
values = option['Values']
if len(values) == 1:
config[key] = values[0]['Value']
else:
config[key] = []
for val in values:
config[key].append(val['Value'])
item = DHCPItem(region=kwargs['region'],
account=kwargs['account_name'],
name=dhcpopt_id, config=config, source_watcher=self)
item_list.append(item)
return item_list, exception_map
return slurp_items()
class DHCPItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, config=None, source_watcher=None):
super(DHCPItem, self).__init__(
index=DHCP.index,
region=region,
account=account,
name=name,
new_config=config if config else {},
source_watcher=source_watcher)
|
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.vpc.dhcp
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <<EMAIL>>
"""
from security_monkey.decorators import record_exception, iter_account_region
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey import app
class DHCP(Watcher):
index = 'dhcp'
i_am_singular = 'DHCP Option Set'
i_am_plural = 'DHCP Option Sets'
def __init__(self, accounts=None, debug=False):
super(DHCP, self).__init__(accounts=accounts, debug=debug)
@record_exception()
def describe_dhcp_options(self, **kwargs):
from security_monkey.common.sts_connect import connect
conn = connect(kwargs['account_name'], 'boto3.ec2.client', region=kwargs['region'],
assumed_role=kwargs['assumed_role'])
dhcp_option_sets_resp = self.wrap_aws_rate_limited_call(
conn.describe_dhcp_options)
dhcp_option_sets = dhcp_option_sets_resp.get('DhcpOptions', [])
return dhcp_option_sets
def slurp(self):
"""
:returns: item_list - list of dhcp option sets.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
@iter_account_region(index=self.index, accounts=self.accounts, service_name='ec2')
def slurp_items(**kwargs):
item_list = []
exception_map = {}
kwargs['exception_map'] = exception_map
app.logger.debug("Checking {}/{}/{}".format(self.index,
kwargs['account_name'], kwargs['region']))
dhcp_option_sets = self.describe_dhcp_options(**kwargs)
if dhcp_option_sets:
app.logger.debug("Found {} {}".format(
len(dhcp_option_sets), self.i_am_plural))
for dhcpopt in dhcp_option_sets:
dhcpopt_id = dhcpopt.get('DhcpOptionsId')
if self.check_ignore_list(dhcpopt_id):
continue
dhcpopt_configurations = dhcpopt.get(
'DhcpConfigurations', [])
config = {'id': dhcpopt_id}
for option in dhcpopt_configurations:
key = option['Key']
values = option['Values']
if len(values) == 1:
config[key] = values[0]['Value']
else:
config[key] = []
for val in values:
config[key].append(val['Value'])
item = DHCPItem(region=kwargs['region'],
account=kwargs['account_name'],
name=dhcpopt_id, config=config, source_watcher=self)
item_list.append(item)
return item_list, exception_map
return slurp_items()
class DHCPItem(ChangeItem):
def __init__(self, region=None, account=None, name=None, config=None, source_watcher=None):
super(DHCPItem, self).__init__(
index=DHCP.index,
region=region,
account=account,
name=name,
new_config=config if config else {},
source_watcher=source_watcher)
|
en
| 0.737738
|
# Copyright 2016 Bridgewater Associates # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. .. module: security_monkey.watchers.vpc.dhcp :platform: Unix .. version:: $$VERSION$$ .. moduleauthor:: Bridgewater OSS <<EMAIL>> :returns: item_list - list of dhcp option sets. :returns: exception_map - A dict where the keys are a tuple containing the location of the exception and the value is the actual exception
| 1.738714
| 2
|
spark-src/recommendation.py
|
bemova/Who-To-Follow-Algorithm-for-Massive-Dataset-Mining
| 0
|
6625567
|
class Recommendation:
"""
Created by Arezou on 2017-02-12.
This Class describe a recommendation, and a recommendation has a
user_Id and a number of followed user by this user in common (common_user_count).
"""
def __init__(self, user_Id, common_user_count):
"""
:param user_Id: user id as a str
:param common_user_count: is the number of followed users by this
user in common.
"""
self.user_Id = user_Id
self.common_user_count = common_user_count
def __str__(self):
"""
:return: string of Recommendation Object in this style: user_Id(common_user_count)
"""
return str(self.user_Id) + '(' + str(self.common_user_count) + ')'
def __gt__(self, other):
"""
:param other: comparing two recommendation objects based on the common_user_count.
:return: True if first recommendation object has more common_user_count than second one.
Otherwise, returns False.
"""
return self.common_user_count > other.common_user_count
|
class Recommendation:
"""
Created by Arezou on 2017-02-12.
This Class describe a recommendation, and a recommendation has a
user_Id and a number of followed user by this user in common (common_user_count).
"""
def __init__(self, user_Id, common_user_count):
"""
:param user_Id: user id as a str
:param common_user_count: is the number of followed users by this
user in common.
"""
self.user_Id = user_Id
self.common_user_count = common_user_count
def __str__(self):
"""
:return: string of Recommendation Object in this style: user_Id(common_user_count)
"""
return str(self.user_Id) + '(' + str(self.common_user_count) + ')'
def __gt__(self, other):
"""
:param other: comparing two recommendation objects based on the common_user_count.
:return: True if first recommendation object has more common_user_count than second one.
Otherwise, returns False.
"""
return self.common_user_count > other.common_user_count
|
en
| 0.845462
|
Created by Arezou on 2017-02-12. This Class describe a recommendation, and a recommendation has a user_Id and a number of followed user by this user in common (common_user_count). :param user_Id: user id as a str :param common_user_count: is the number of followed users by this user in common. :return: string of Recommendation Object in this style: user_Id(common_user_count) :param other: comparing two recommendation objects based on the common_user_count. :return: True if first recommendation object has more common_user_count than second one. Otherwise, returns False.
| 3.5669
| 4
|
mesonbuild/cmake/traceparser.py
|
agrexgh/meson
| 0
|
6625568
|
<gh_stars>0
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .generator import parse_generator_expressions
from .. import mlog
from ..mesonlib import version_compare
import typing as T
from pathlib import Path
import re
import os
import json
import textwrap
class CMakeTraceLine:
def __init__(self, file, line, func, args):
self.file = file
self.line = line
self.func = func.lower()
self.args = args
def __repr__(self):
s = 'CMake TRACE: {0}:{1} {2}({3})'
return s.format(self.file, self.line, self.func, self.args)
class CMakeTarget:
def __init__(self, name, target_type, properties=None, imported: bool = False, tline: T.Optional[CMakeTraceLine] = None):
if properties is None:
properties = {}
self.name = name
self.type = target_type
self.properties = properties
self.imported = imported
self.tline = tline
self.depends = []
self.current_bin_dir = None
self.current_src_dir = None
def __repr__(self):
s = 'CMake TARGET:\n -- name: {}\n -- type: {}\n -- imported: {}\n -- properties: {{\n{} }}\n -- tline: {}'
propSTR = ''
for i in self.properties:
propSTR += " '{}': {}\n".format(i, self.properties[i])
return s.format(self.name, self.type, self.imported, propSTR, self.tline)
def strip_properties(self) -> None:
# Strip the strings in the properties
if not self.properties:
return
for key, val in self.properties.items():
self.properties[key] = [x.strip() for x in val]
class CMakeGeneratorTarget(CMakeTarget):
def __init__(self, name):
super().__init__(name, 'CUSTOM', {})
self.outputs = [] # type: T.List[str]
self.command = [] # type: T.List[T.List[str]]
self.working_dir = None # type: T.Optional[str]
class CMakeTraceParser:
def __init__(self, cmake_version: str, build_dir: str, permissive: bool = False):
self.vars = {} # type: T.Dict[str, T.List[str]]
self.targets = {} # type: T.Dict[str, CMakeTarget]
self.explicit_headers = set() # type: T.Set[Path]
# T.List of targes that were added with add_custom_command to generate files
self.custom_targets = [] # type: T.List[CMakeGeneratorTarget]
self.permissive = permissive # type: bool
self.cmake_version = cmake_version # type: str
self.trace_file = 'cmake_trace.txt'
self.trace_file_path = Path(build_dir) / self.trace_file
self.trace_format = 'json-v1' if version_compare(cmake_version, '>=3.17') else 'human'
# State for delayed command execution. Delayed command execution is realised
# with a custom CMake file that overrides some functions and adds some
# introspection information to the trace.
self.delayed_commands = [] # type: T.List[str]
self.stored_commands = [] # type: T.List[CMakeTraceLine]
# All supported functions
self.functions = {
'set': self._cmake_set,
'unset': self._cmake_unset,
'add_executable': self._cmake_add_executable,
'add_library': self._cmake_add_library,
'add_custom_command': self._cmake_add_custom_command,
'add_custom_target': self._cmake_add_custom_target,
'set_property': self._cmake_set_property,
'set_target_properties': self._cmake_set_target_properties,
'target_compile_definitions': self._cmake_target_compile_definitions,
'target_compile_options': self._cmake_target_compile_options,
'target_include_directories': self._cmake_target_include_directories,
'target_link_libraries': self._cmake_target_link_libraries,
'target_link_options': self._cmake_target_link_options,
'add_dependencies': self._cmake_add_dependencies,
# Special functions defined in the preload script.
# These functions do nothing in the CMake code, but have special
# meaning here in the trace parser.
'meson_ps_execute_delayed_calls': self._meson_ps_execute_delayed_calls,
'meson_ps_reload_vars': self._meson_ps_reload_vars,
}
def trace_args(self) -> T.List[str]:
arg_map = {
'human': ['--trace', '--trace-expand'],
'json-v1': ['--trace-expand', '--trace-format=json-v1'],
}
base_args = ['--no-warn-unused-cli']
if not self.requires_stderr():
base_args += ['--trace-redirect={}'.format(self.trace_file)]
return arg_map[self.trace_format] + base_args
def requires_stderr(self) -> bool:
return version_compare(self.cmake_version, '<3.16')
def parse(self, trace: T.Optional[str] = None) -> None:
# First load the trace (if required)
if not self.requires_stderr():
if not self.trace_file_path.exists and not self.trace_file_path.is_file():
raise CMakeException('CMake: Trace file "{}" not found'.format(str(self.trace_file_path)))
trace = self.trace_file_path.read_text()
if not trace:
raise CMakeException('CMake: The CMake trace was not provided or is empty')
# Second parse the trace
lexer1 = None
if self.trace_format == 'human':
lexer1 = self._lex_trace_human(trace)
elif self.trace_format == 'json-v1':
lexer1 = self._lex_trace_json(trace)
else:
raise CMakeException('CMake: Internal error: Invalid trace format {}. Expected [human, json-v1]'.format(self.trace_format))
# Primary pass -- parse everything
for l in lexer1:
# store the function if its execution should be delayed
if l.func in self.delayed_commands:
self.stored_commands += [l]
continue
# "Execute" the CMake function if supported
fn = self.functions.get(l.func, None)
if(fn):
fn(l)
# Postprocess
for tgt in self.targets.values():
tgt.strip_properties()
def get_first_cmake_var_of(self, var_list: T.List[str]) -> T.List[str]:
# Return the first found CMake variable in list var_list
for i in var_list:
if i in self.vars:
return self.vars[i]
return []
def get_cmake_var(self, var: str) -> T.List[str]:
# Return the value of the CMake variable var or an empty list if var does not exist
if var in self.vars:
return self.vars[var]
return []
def var_to_str(self, var: str) -> T.Optional[str]:
if var in self.vars and self.vars[var]:
return self.vars[var][0]
return None
def _str_to_bool(self, expr: T.Union[str, T.List[str]]) -> bool:
if not expr:
return False
if isinstance(expr, list):
expr_str = expr[0]
else:
expr_str = expr
expr_str = expr_str.upper()
return expr_str not in ['0', 'OFF', 'NO', 'FALSE', 'N', 'IGNORE'] and not expr_str.endswith('NOTFOUND')
def var_to_bool(self, var: str) -> bool:
return self._str_to_bool(self.vars.get(var, []))
def _gen_exception(self, function: str, error: str, tline: CMakeTraceLine) -> None:
# Generate an exception if the parser is not in permissive mode
if self.permissive:
mlog.debug('CMake trace warning: {}() {}\n{}'.format(function, error, tline))
return None
raise CMakeException('CMake: {}() {}\n{}'.format(function, error, tline))
def _cmake_set(self, tline: CMakeTraceLine) -> None:
"""Handler for the CMake set() function in all variaties.
comes in three flavors:
set(<var> <value> [PARENT_SCOPE])
set(<var> <value> CACHE <type> <docstring> [FORCE])
set(ENV{<var>} <value>)
We don't support the ENV variant, and any uses of it will be ignored
silently. the other two variates are supported, with some caveats:
- we don't properly handle scoping, so calls to set() inside a
function without PARENT_SCOPE set could incorrectly shadow the
outer scope.
- We don't honor the type of CACHE arguments
"""
# DOC: https://cmake.org/cmake/help/latest/command/set.html
# 1st remove PARENT_SCOPE and CACHE from args
args = []
for i in tline.args:
if not i or i == 'PARENT_SCOPE':
continue
# Discard everything after the CACHE keyword
if i == 'CACHE':
break
args.append(i)
if len(args) < 1:
return self._gen_exception('set', 'requires at least one argument', tline)
# Now that we've removed extra arguments all that should be left is the
# variable identifier and the value, join the value back together to
# ensure spaces in the value are correctly handled. This assumes that
# variable names don't have spaces. Please don't do that...
identifier = args.pop(0)
value = ' '.join(args)
if not value:
# Same as unset
if identifier in self.vars:
del self.vars[identifier]
else:
self.vars[identifier] = value.split(';')
def _cmake_unset(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/unset.html
if len(tline.args) < 1:
return self._gen_exception('unset', 'requires at least one argument', tline)
if tline.args[0] in self.vars:
del self.vars[tline.args[0]]
def _cmake_add_executable(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_executable.html
args = list(tline.args) # Make a working copy
# Make sure the exe is imported
if 'IMPORTED' not in args:
return self._gen_exception('add_executable', 'non imported executables are not supported', tline)
args.remove('IMPORTED')
if len(args) < 1:
return self._gen_exception('add_executable', 'requires at least 1 argument', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'EXECUTABLE', {})
def _cmake_add_library(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_library.html
args = list(tline.args) # Make a working copy
# Make sure the lib is imported
if 'INTERFACE' in args:
args.remove('INTERFACE')
if len(args) < 1:
return self._gen_exception('add_library', 'interface library name not specified', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'INTERFACE', {}, tline=tline, imported='IMPORTED' in args)
elif 'IMPORTED' in args:
args.remove('IMPORTED')
# Now, only look at the first two arguments (target_name and target_type) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
self.targets[args[0]] = CMakeTarget(args[0], args[1], {}, tline=tline, imported=True)
elif 'ALIAS' in args:
args.remove('ALIAS')
# Now, only look at the first two arguments (target_name and target_ref) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
# Simulate the ALIAS with INTERFACE_LINK_LIBRARIES
self.targets[args[0]] = CMakeTarget(args[0], 'ALIAS', {'INTERFACE_LINK_LIBRARIES': [args[1]]}, tline=tline)
elif 'OBJECT' in args:
return self._gen_exception('add_library', 'OBJECT libraries are not supported', tline)
else:
self.targets[args[0]] = CMakeTarget(args[0], 'NORMAL', {}, tline=tline)
def _cmake_add_custom_command(self, tline: CMakeTraceLine, name=None):
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html
args = self._flatten_args(list(tline.args)) # Commands can be passed as ';' seperated lists
if not args:
return self._gen_exception('add_custom_command', 'requires at least 1 argument', tline)
# Skip the second function signature
if args[0] == 'TARGET':
return self._gen_exception('add_custom_command', 'TARGET syntax is currently not supported', tline)
magic_keys = ['OUTPUT', 'COMMAND', 'MAIN_DEPENDENCY', 'DEPENDS', 'BYPRODUCTS',
'IMPLICIT_DEPENDS', 'WORKING_DIRECTORY', 'COMMENT', 'DEPFILE',
'JOB_POOL', 'VERBATIM', 'APPEND', 'USES_TERMINAL', 'COMMAND_EXPAND_LISTS']
target = CMakeGeneratorTarget(name)
def handle_output(key: str, target: CMakeGeneratorTarget) -> None:
target.outputs += [key]
def handle_command(key: str, target: CMakeGeneratorTarget) -> None:
if key == 'ARGS':
return
target.command[-1] += [key]
def handle_depends(key: str, target: CMakeGeneratorTarget) -> None:
target.depends += [key]
def handle_working_dir(key: str, target: CMakeGeneratorTarget) -> None:
if target.working_dir is None:
target.working_dir = key
else:
target.working_dir += ' '
target.working_dir += key
fn = None
for i in args:
if i in magic_keys:
if i == 'OUTPUT':
fn = handle_output
elif i == 'DEPENDS':
fn = handle_depends
elif i == 'WORKING_DIRECTORY':
fn = handle_working_dir
elif i == 'COMMAND':
fn = handle_command
target.command += [[]]
else:
fn = None
continue
if fn is not None:
fn(i, target)
target.current_bin_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_BINARY_DIR')
target.current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
target.outputs = self._guess_files(target.outputs)
target.depends = self._guess_files(target.depends)
target.command = [self._guess_files(x) for x in target.command]
self.custom_targets += [target]
if name:
self.targets[name] = target
def _cmake_add_custom_target(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html
# We only the first parameter (the target name) is interesting
if len(tline.args) < 1:
return self._gen_exception('add_custom_target', 'requires at least one argument', tline)
# It's pretty much the same as a custom command
self._cmake_add_custom_command(tline, tline.args[0])
def _cmake_set_property(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_property.html
args = list(tline.args)
scope = args.pop(0)
append = False
targets = []
while args:
curr = args.pop(0)
# XXX: APPEND_STRING is specifically *not* supposed to create a
# list, is treating them as aliases really okay?
if curr == 'APPEND' or curr == 'APPEND_STRING':
append = True
continue
if curr == 'PROPERTY':
break
targets += curr.split(';')
if not args:
return self._gen_exception('set_property', 'faild to parse argument list', tline)
if len(args) == 1:
# Tries to set property to nothing so nothing has to be done
return
identifier = args.pop(0)
if self.trace_format == 'human':
value = ' '.join(args).split(';')
else:
value = [y for x in args for y in x.split(';')]
if not value:
return
def do_target(tgt: str) -> None:
if i not in self.targets:
return self._gen_exception('set_property', 'TARGET {} not found'.format(i), tline)
if identifier not in self.targets[i].properties:
self.targets[i].properties[identifier] = []
if append:
self.targets[i].properties[identifier] += value
else:
self.targets[i].properties[identifier] = value
def do_source(src: str) -> None:
if identifier != 'HEADER_FILE_ONLY' or not self._str_to_bool(value):
return
current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
if not current_src_dir:
mlog.warning(textwrap.dedent('''\
CMake trace: set_property(SOURCE) called before the preload script was loaded.
Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors.
'''))
current_src_dir = '.'
cur_p = Path(current_src_dir)
src_p = Path(src)
if not src_p.is_absolute():
src_p = cur_p / src_p
self.explicit_headers.add(src_p)
if scope == 'TARGET':
for i in targets:
do_target(i)
elif scope == 'SOURCE':
files = self._guess_files(targets)
for i in files:
do_source(i)
def _cmake_set_target_properties(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_target_properties.html
args = list(tline.args)
targets = []
while args:
curr = args.pop(0)
if curr == 'PROPERTIES':
break
targets.append(curr)
# Now we need to try to reconsitute the original quoted format of the
# arguments, as a property value could have spaces in it. Unlike
# set_property() this is not context free. There are two approaches I
# can think of, both have drawbacks:
#
# 1. Assume that the property will be capitalized ([A-Z_]), this is
# convention but cmake doesn't require it.
# 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties
#
# Neither of these is awesome for obvious reasons. I'm going to try
# option 1 first and fall back to 2, as 1 requires less code and less
# synchroniztion for cmake changes.
#
# With the JSON output format, introduced in CMake 3.17, spaces are
# handled properly and we don't have to do either options
arglist = [] # type: T.List[T.Tuple[str, T.List[str]]]
if self.trace_format == 'human':
name = args.pop(0)
values = []
prop_regex = re.compile(r'^[A-Z_]+$')
for a in args:
if prop_regex.match(a):
if values:
arglist.append((name, ' '.join(values).split(';')))
name = a
values = []
else:
values.append(a)
if values:
arglist.append((name, ' '.join(values).split(';')))
else:
arglist = [(x[0], x[1].split(';')) for x in zip(args[::2], args[1::2])]
for name, value in arglist:
for i in targets:
if i not in self.targets:
return self._gen_exception('set_target_properties', 'TARGET {} not found'.format(i), tline)
self.targets[i].properties[name] = value
def _cmake_add_dependencies(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html
args = list(tline.args)
if len(args) < 2:
return self._gen_exception('add_dependencies', 'takes at least 2 arguments', tline)
target = self.targets.get(args[0])
if not target:
return self._gen_exception('add_dependencies', 'target not found', tline)
for i in args[1:]:
target.depends += i.split(';')
def _cmake_target_compile_definitions(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html
self._parse_common_target_options('target_compile_definitions', 'COMPILE_DEFINITIONS', 'INTERFACE_COMPILE_DEFINITIONS', tline)
def _cmake_target_compile_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html
self._parse_common_target_options('target_compile_options', 'COMPILE_OPTIONS', 'INTERFACE_COMPILE_OPTIONS', tline)
def _cmake_target_include_directories(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html
self._parse_common_target_options('target_include_directories', 'INCLUDE_DIRECTORIES', 'INTERFACE_INCLUDE_DIRECTORIES', tline, ignore=['SYSTEM', 'BEFORE'], paths=True)
def _cmake_target_link_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html
self._parse_common_target_options('target_link_options', 'LINK_OPTIONS', 'INTERFACE_LINK_OPTIONS', tline)
def _cmake_target_link_libraries(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html
self._parse_common_target_options('target_link_options', 'LINK_LIBRARIES', 'INTERFACE_LINK_LIBRARIES', tline)
def _parse_common_target_options(self, func: str, private_prop: str, interface_prop: str, tline: CMakeTraceLine, ignore: T.Optional[T.List[str]] = None, paths: bool = False):
if ignore is None:
ignore = ['BEFORE']
args = list(tline.args)
if len(args) < 1:
return self._gen_exception(func, 'requires at least one argument', tline)
target = args[0]
if target not in self.targets:
return self._gen_exception(func, 'TARGET {} not found'.format(target), tline)
interface = []
private = []
mode = 'PUBLIC'
for i in args[1:]:
if i in ignore:
continue
if i in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'PRIVATE', 'LINK_PUBLIC', 'LINK_PRIVATE']:
mode = i
continue
if mode in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'LINK_PUBLIC']:
interface += [i]
if mode in ['PUBLIC', 'PRIVATE', 'LINK_PRIVATE']:
private += [i]
if paths:
interface = self._guess_files(interface)
private = self._guess_files(private)
interface = [x for x in interface if x]
private = [x for x in private if x]
for i in [(private_prop, private), (interface_prop, interface)]:
if not i[0] in self.targets[target].properties:
self.targets[target].properties[i[0]] = []
self.targets[target].properties[i[0]] += i[1]
def _meson_ps_execute_delayed_calls(self, tline: CMakeTraceLine) -> None:
for l in self.stored_commands:
fn = self.functions.get(l.func, None)
if(fn):
fn(l)
# clear the stored commands
self.stored_commands = []
def _meson_ps_reload_vars(self, tline: CMakeTraceLine) -> None:
self.delayed_commands = self.get_cmake_var('MESON_PS_DELAYED_CALLS')
def _lex_trace_human(self, trace):
# The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n'
reg_tline = re.compile(r'\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(([\s\S]*?) ?\)\s*\n', re.MULTILINE)
reg_other = re.compile(r'[^\n]*\n')
loc = 0
while loc < len(trace):
mo_file_line = reg_tline.match(trace, loc)
if not mo_file_line:
skip_match = reg_other.match(trace, loc)
if not skip_match:
print(trace[loc:])
raise CMakeException('Failed to parse CMake trace')
loc = skip_match.end()
continue
loc = mo_file_line.end()
file = mo_file_line.group(1)
line = mo_file_line.group(3)
func = mo_file_line.group(4)
args = mo_file_line.group(5)
args = parse_generator_expressions(args)
args = args.split(' ')
args = list(map(lambda x: x.strip(), args))
yield CMakeTraceLine(file, line, func, args)
def _lex_trace_json(self, trace: str):
lines = trace.splitlines(keepends=False)
lines.pop(0) # The first line is the version
for i in lines:
data = json.loads(i)
args = data['args']
args = [parse_generator_expressions(x) for x in args]
yield CMakeTraceLine(data['file'], data['line'], data['cmd'], args)
def _flatten_args(self, args: T.List[str]) -> T.List[str]:
# Split lists in arguments
res = [] # type: T.List[str]
for i in args:
res += i.split(';')
return res
def _guess_files(self, broken_list: T.List[str]) -> T.List[str]:
# Nothing has to be done for newer formats
if self.trace_format != 'human':
return broken_list
# Try joining file paths that contain spaces
reg_start = re.compile(r'^([A-Za-z]:)?/(.*/)*[^./]+$')
reg_end = re.compile(r'^.*\.[a-zA-Z]+$')
fixed_list = [] # type: T.List[str]
curr_str = None # type: T.Optional[str]
for i in broken_list:
if curr_str is None:
curr_str = i
elif os.path.isfile(curr_str):
# Abort concatenation if curr_str is an existing file
fixed_list += [curr_str]
curr_str = i
elif not reg_start.match(curr_str):
# Abort concatenation if curr_str no longer matches the regex
fixed_list += [curr_str]
curr_str = i
elif reg_end.match(i) or os.path.exists('{} {}'.format(curr_str, i)):
# File detected
curr_str = '{} {}'.format(curr_str, i)
fixed_list += [curr_str]
curr_str = None
else:
curr_str = '{} {}'.format(curr_str, i)
if curr_str:
fixed_list += [curr_str]
return fixed_list
|
# Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .generator import parse_generator_expressions
from .. import mlog
from ..mesonlib import version_compare
import typing as T
from pathlib import Path
import re
import os
import json
import textwrap
class CMakeTraceLine:
def __init__(self, file, line, func, args):
self.file = file
self.line = line
self.func = func.lower()
self.args = args
def __repr__(self):
s = 'CMake TRACE: {0}:{1} {2}({3})'
return s.format(self.file, self.line, self.func, self.args)
class CMakeTarget:
def __init__(self, name, target_type, properties=None, imported: bool = False, tline: T.Optional[CMakeTraceLine] = None):
if properties is None:
properties = {}
self.name = name
self.type = target_type
self.properties = properties
self.imported = imported
self.tline = tline
self.depends = []
self.current_bin_dir = None
self.current_src_dir = None
def __repr__(self):
s = 'CMake TARGET:\n -- name: {}\n -- type: {}\n -- imported: {}\n -- properties: {{\n{} }}\n -- tline: {}'
propSTR = ''
for i in self.properties:
propSTR += " '{}': {}\n".format(i, self.properties[i])
return s.format(self.name, self.type, self.imported, propSTR, self.tline)
def strip_properties(self) -> None:
# Strip the strings in the properties
if not self.properties:
return
for key, val in self.properties.items():
self.properties[key] = [x.strip() for x in val]
class CMakeGeneratorTarget(CMakeTarget):
def __init__(self, name):
super().__init__(name, 'CUSTOM', {})
self.outputs = [] # type: T.List[str]
self.command = [] # type: T.List[T.List[str]]
self.working_dir = None # type: T.Optional[str]
class CMakeTraceParser:
def __init__(self, cmake_version: str, build_dir: str, permissive: bool = False):
self.vars = {} # type: T.Dict[str, T.List[str]]
self.targets = {} # type: T.Dict[str, CMakeTarget]
self.explicit_headers = set() # type: T.Set[Path]
# T.List of targes that were added with add_custom_command to generate files
self.custom_targets = [] # type: T.List[CMakeGeneratorTarget]
self.permissive = permissive # type: bool
self.cmake_version = cmake_version # type: str
self.trace_file = 'cmake_trace.txt'
self.trace_file_path = Path(build_dir) / self.trace_file
self.trace_format = 'json-v1' if version_compare(cmake_version, '>=3.17') else 'human'
# State for delayed command execution. Delayed command execution is realised
# with a custom CMake file that overrides some functions and adds some
# introspection information to the trace.
self.delayed_commands = [] # type: T.List[str]
self.stored_commands = [] # type: T.List[CMakeTraceLine]
# All supported functions
self.functions = {
'set': self._cmake_set,
'unset': self._cmake_unset,
'add_executable': self._cmake_add_executable,
'add_library': self._cmake_add_library,
'add_custom_command': self._cmake_add_custom_command,
'add_custom_target': self._cmake_add_custom_target,
'set_property': self._cmake_set_property,
'set_target_properties': self._cmake_set_target_properties,
'target_compile_definitions': self._cmake_target_compile_definitions,
'target_compile_options': self._cmake_target_compile_options,
'target_include_directories': self._cmake_target_include_directories,
'target_link_libraries': self._cmake_target_link_libraries,
'target_link_options': self._cmake_target_link_options,
'add_dependencies': self._cmake_add_dependencies,
# Special functions defined in the preload script.
# These functions do nothing in the CMake code, but have special
# meaning here in the trace parser.
'meson_ps_execute_delayed_calls': self._meson_ps_execute_delayed_calls,
'meson_ps_reload_vars': self._meson_ps_reload_vars,
}
def trace_args(self) -> T.List[str]:
arg_map = {
'human': ['--trace', '--trace-expand'],
'json-v1': ['--trace-expand', '--trace-format=json-v1'],
}
base_args = ['--no-warn-unused-cli']
if not self.requires_stderr():
base_args += ['--trace-redirect={}'.format(self.trace_file)]
return arg_map[self.trace_format] + base_args
def requires_stderr(self) -> bool:
return version_compare(self.cmake_version, '<3.16')
def parse(self, trace: T.Optional[str] = None) -> None:
# First load the trace (if required)
if not self.requires_stderr():
if not self.trace_file_path.exists and not self.trace_file_path.is_file():
raise CMakeException('CMake: Trace file "{}" not found'.format(str(self.trace_file_path)))
trace = self.trace_file_path.read_text()
if not trace:
raise CMakeException('CMake: The CMake trace was not provided or is empty')
# Second parse the trace
lexer1 = None
if self.trace_format == 'human':
lexer1 = self._lex_trace_human(trace)
elif self.trace_format == 'json-v1':
lexer1 = self._lex_trace_json(trace)
else:
raise CMakeException('CMake: Internal error: Invalid trace format {}. Expected [human, json-v1]'.format(self.trace_format))
# Primary pass -- parse everything
for l in lexer1:
# store the function if its execution should be delayed
if l.func in self.delayed_commands:
self.stored_commands += [l]
continue
# "Execute" the CMake function if supported
fn = self.functions.get(l.func, None)
if(fn):
fn(l)
# Postprocess
for tgt in self.targets.values():
tgt.strip_properties()
def get_first_cmake_var_of(self, var_list: T.List[str]) -> T.List[str]:
# Return the first found CMake variable in list var_list
for i in var_list:
if i in self.vars:
return self.vars[i]
return []
def get_cmake_var(self, var: str) -> T.List[str]:
# Return the value of the CMake variable var or an empty list if var does not exist
if var in self.vars:
return self.vars[var]
return []
def var_to_str(self, var: str) -> T.Optional[str]:
if var in self.vars and self.vars[var]:
return self.vars[var][0]
return None
def _str_to_bool(self, expr: T.Union[str, T.List[str]]) -> bool:
if not expr:
return False
if isinstance(expr, list):
expr_str = expr[0]
else:
expr_str = expr
expr_str = expr_str.upper()
return expr_str not in ['0', 'OFF', 'NO', 'FALSE', 'N', 'IGNORE'] and not expr_str.endswith('NOTFOUND')
def var_to_bool(self, var: str) -> bool:
return self._str_to_bool(self.vars.get(var, []))
def _gen_exception(self, function: str, error: str, tline: CMakeTraceLine) -> None:
# Generate an exception if the parser is not in permissive mode
if self.permissive:
mlog.debug('CMake trace warning: {}() {}\n{}'.format(function, error, tline))
return None
raise CMakeException('CMake: {}() {}\n{}'.format(function, error, tline))
def _cmake_set(self, tline: CMakeTraceLine) -> None:
"""Handler for the CMake set() function in all variaties.
comes in three flavors:
set(<var> <value> [PARENT_SCOPE])
set(<var> <value> CACHE <type> <docstring> [FORCE])
set(ENV{<var>} <value>)
We don't support the ENV variant, and any uses of it will be ignored
silently. the other two variates are supported, with some caveats:
- we don't properly handle scoping, so calls to set() inside a
function without PARENT_SCOPE set could incorrectly shadow the
outer scope.
- We don't honor the type of CACHE arguments
"""
# DOC: https://cmake.org/cmake/help/latest/command/set.html
# 1st remove PARENT_SCOPE and CACHE from args
args = []
for i in tline.args:
if not i or i == 'PARENT_SCOPE':
continue
# Discard everything after the CACHE keyword
if i == 'CACHE':
break
args.append(i)
if len(args) < 1:
return self._gen_exception('set', 'requires at least one argument', tline)
# Now that we've removed extra arguments all that should be left is the
# variable identifier and the value, join the value back together to
# ensure spaces in the value are correctly handled. This assumes that
# variable names don't have spaces. Please don't do that...
identifier = args.pop(0)
value = ' '.join(args)
if not value:
# Same as unset
if identifier in self.vars:
del self.vars[identifier]
else:
self.vars[identifier] = value.split(';')
def _cmake_unset(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/unset.html
if len(tline.args) < 1:
return self._gen_exception('unset', 'requires at least one argument', tline)
if tline.args[0] in self.vars:
del self.vars[tline.args[0]]
def _cmake_add_executable(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_executable.html
args = list(tline.args) # Make a working copy
# Make sure the exe is imported
if 'IMPORTED' not in args:
return self._gen_exception('add_executable', 'non imported executables are not supported', tline)
args.remove('IMPORTED')
if len(args) < 1:
return self._gen_exception('add_executable', 'requires at least 1 argument', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'EXECUTABLE', {})
def _cmake_add_library(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_library.html
args = list(tline.args) # Make a working copy
# Make sure the lib is imported
if 'INTERFACE' in args:
args.remove('INTERFACE')
if len(args) < 1:
return self._gen_exception('add_library', 'interface library name not specified', tline)
self.targets[args[0]] = CMakeTarget(args[0], 'INTERFACE', {}, tline=tline, imported='IMPORTED' in args)
elif 'IMPORTED' in args:
args.remove('IMPORTED')
# Now, only look at the first two arguments (target_name and target_type) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
self.targets[args[0]] = CMakeTarget(args[0], args[1], {}, tline=tline, imported=True)
elif 'ALIAS' in args:
args.remove('ALIAS')
# Now, only look at the first two arguments (target_name and target_ref) and ignore the rest
if len(args) < 2:
return self._gen_exception('add_library', 'requires at least 2 arguments', tline)
# Simulate the ALIAS with INTERFACE_LINK_LIBRARIES
self.targets[args[0]] = CMakeTarget(args[0], 'ALIAS', {'INTERFACE_LINK_LIBRARIES': [args[1]]}, tline=tline)
elif 'OBJECT' in args:
return self._gen_exception('add_library', 'OBJECT libraries are not supported', tline)
else:
self.targets[args[0]] = CMakeTarget(args[0], 'NORMAL', {}, tline=tline)
def _cmake_add_custom_command(self, tline: CMakeTraceLine, name=None):
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html
args = self._flatten_args(list(tline.args)) # Commands can be passed as ';' seperated lists
if not args:
return self._gen_exception('add_custom_command', 'requires at least 1 argument', tline)
# Skip the second function signature
if args[0] == 'TARGET':
return self._gen_exception('add_custom_command', 'TARGET syntax is currently not supported', tline)
magic_keys = ['OUTPUT', 'COMMAND', 'MAIN_DEPENDENCY', 'DEPENDS', 'BYPRODUCTS',
'IMPLICIT_DEPENDS', 'WORKING_DIRECTORY', 'COMMENT', 'DEPFILE',
'JOB_POOL', 'VERBATIM', 'APPEND', 'USES_TERMINAL', 'COMMAND_EXPAND_LISTS']
target = CMakeGeneratorTarget(name)
def handle_output(key: str, target: CMakeGeneratorTarget) -> None:
target.outputs += [key]
def handle_command(key: str, target: CMakeGeneratorTarget) -> None:
if key == 'ARGS':
return
target.command[-1] += [key]
def handle_depends(key: str, target: CMakeGeneratorTarget) -> None:
target.depends += [key]
def handle_working_dir(key: str, target: CMakeGeneratorTarget) -> None:
if target.working_dir is None:
target.working_dir = key
else:
target.working_dir += ' '
target.working_dir += key
fn = None
for i in args:
if i in magic_keys:
if i == 'OUTPUT':
fn = handle_output
elif i == 'DEPENDS':
fn = handle_depends
elif i == 'WORKING_DIRECTORY':
fn = handle_working_dir
elif i == 'COMMAND':
fn = handle_command
target.command += [[]]
else:
fn = None
continue
if fn is not None:
fn(i, target)
target.current_bin_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_BINARY_DIR')
target.current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
target.outputs = self._guess_files(target.outputs)
target.depends = self._guess_files(target.depends)
target.command = [self._guess_files(x) for x in target.command]
self.custom_targets += [target]
if name:
self.targets[name] = target
def _cmake_add_custom_target(self, tline: CMakeTraceLine):
# DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html
# We only the first parameter (the target name) is interesting
if len(tline.args) < 1:
return self._gen_exception('add_custom_target', 'requires at least one argument', tline)
# It's pretty much the same as a custom command
self._cmake_add_custom_command(tline, tline.args[0])
def _cmake_set_property(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_property.html
args = list(tline.args)
scope = args.pop(0)
append = False
targets = []
while args:
curr = args.pop(0)
# XXX: APPEND_STRING is specifically *not* supposed to create a
# list, is treating them as aliases really okay?
if curr == 'APPEND' or curr == 'APPEND_STRING':
append = True
continue
if curr == 'PROPERTY':
break
targets += curr.split(';')
if not args:
return self._gen_exception('set_property', 'faild to parse argument list', tline)
if len(args) == 1:
# Tries to set property to nothing so nothing has to be done
return
identifier = args.pop(0)
if self.trace_format == 'human':
value = ' '.join(args).split(';')
else:
value = [y for x in args for y in x.split(';')]
if not value:
return
def do_target(tgt: str) -> None:
if i not in self.targets:
return self._gen_exception('set_property', 'TARGET {} not found'.format(i), tline)
if identifier not in self.targets[i].properties:
self.targets[i].properties[identifier] = []
if append:
self.targets[i].properties[identifier] += value
else:
self.targets[i].properties[identifier] = value
def do_source(src: str) -> None:
if identifier != 'HEADER_FILE_ONLY' or not self._str_to_bool(value):
return
current_src_dir = self.var_to_str('MESON_PS_CMAKE_CURRENT_SOURCE_DIR')
if not current_src_dir:
mlog.warning(textwrap.dedent('''\
CMake trace: set_property(SOURCE) called before the preload script was loaded.
Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors.
'''))
current_src_dir = '.'
cur_p = Path(current_src_dir)
src_p = Path(src)
if not src_p.is_absolute():
src_p = cur_p / src_p
self.explicit_headers.add(src_p)
if scope == 'TARGET':
for i in targets:
do_target(i)
elif scope == 'SOURCE':
files = self._guess_files(targets)
for i in files:
do_source(i)
def _cmake_set_target_properties(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/set_target_properties.html
args = list(tline.args)
targets = []
while args:
curr = args.pop(0)
if curr == 'PROPERTIES':
break
targets.append(curr)
# Now we need to try to reconsitute the original quoted format of the
# arguments, as a property value could have spaces in it. Unlike
# set_property() this is not context free. There are two approaches I
# can think of, both have drawbacks:
#
# 1. Assume that the property will be capitalized ([A-Z_]), this is
# convention but cmake doesn't require it.
# 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties
#
# Neither of these is awesome for obvious reasons. I'm going to try
# option 1 first and fall back to 2, as 1 requires less code and less
# synchroniztion for cmake changes.
#
# With the JSON output format, introduced in CMake 3.17, spaces are
# handled properly and we don't have to do either options
arglist = [] # type: T.List[T.Tuple[str, T.List[str]]]
if self.trace_format == 'human':
name = args.pop(0)
values = []
prop_regex = re.compile(r'^[A-Z_]+$')
for a in args:
if prop_regex.match(a):
if values:
arglist.append((name, ' '.join(values).split(';')))
name = a
values = []
else:
values.append(a)
if values:
arglist.append((name, ' '.join(values).split(';')))
else:
arglist = [(x[0], x[1].split(';')) for x in zip(args[::2], args[1::2])]
for name, value in arglist:
for i in targets:
if i not in self.targets:
return self._gen_exception('set_target_properties', 'TARGET {} not found'.format(i), tline)
self.targets[i].properties[name] = value
def _cmake_add_dependencies(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html
args = list(tline.args)
if len(args) < 2:
return self._gen_exception('add_dependencies', 'takes at least 2 arguments', tline)
target = self.targets.get(args[0])
if not target:
return self._gen_exception('add_dependencies', 'target not found', tline)
for i in args[1:]:
target.depends += i.split(';')
def _cmake_target_compile_definitions(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html
self._parse_common_target_options('target_compile_definitions', 'COMPILE_DEFINITIONS', 'INTERFACE_COMPILE_DEFINITIONS', tline)
def _cmake_target_compile_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html
self._parse_common_target_options('target_compile_options', 'COMPILE_OPTIONS', 'INTERFACE_COMPILE_OPTIONS', tline)
def _cmake_target_include_directories(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html
self._parse_common_target_options('target_include_directories', 'INCLUDE_DIRECTORIES', 'INTERFACE_INCLUDE_DIRECTORIES', tline, ignore=['SYSTEM', 'BEFORE'], paths=True)
def _cmake_target_link_options(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html
self._parse_common_target_options('target_link_options', 'LINK_OPTIONS', 'INTERFACE_LINK_OPTIONS', tline)
def _cmake_target_link_libraries(self, tline: CMakeTraceLine) -> None:
# DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html
self._parse_common_target_options('target_link_options', 'LINK_LIBRARIES', 'INTERFACE_LINK_LIBRARIES', tline)
def _parse_common_target_options(self, func: str, private_prop: str, interface_prop: str, tline: CMakeTraceLine, ignore: T.Optional[T.List[str]] = None, paths: bool = False):
if ignore is None:
ignore = ['BEFORE']
args = list(tline.args)
if len(args) < 1:
return self._gen_exception(func, 'requires at least one argument', tline)
target = args[0]
if target not in self.targets:
return self._gen_exception(func, 'TARGET {} not found'.format(target), tline)
interface = []
private = []
mode = 'PUBLIC'
for i in args[1:]:
if i in ignore:
continue
if i in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'PRIVATE', 'LINK_PUBLIC', 'LINK_PRIVATE']:
mode = i
continue
if mode in ['INTERFACE', 'LINK_INTERFACE_LIBRARIES', 'PUBLIC', 'LINK_PUBLIC']:
interface += [i]
if mode in ['PUBLIC', 'PRIVATE', 'LINK_PRIVATE']:
private += [i]
if paths:
interface = self._guess_files(interface)
private = self._guess_files(private)
interface = [x for x in interface if x]
private = [x for x in private if x]
for i in [(private_prop, private), (interface_prop, interface)]:
if not i[0] in self.targets[target].properties:
self.targets[target].properties[i[0]] = []
self.targets[target].properties[i[0]] += i[1]
def _meson_ps_execute_delayed_calls(self, tline: CMakeTraceLine) -> None:
for l in self.stored_commands:
fn = self.functions.get(l.func, None)
if(fn):
fn(l)
# clear the stored commands
self.stored_commands = []
def _meson_ps_reload_vars(self, tline: CMakeTraceLine) -> None:
self.delayed_commands = self.get_cmake_var('MESON_PS_DELAYED_CALLS')
def _lex_trace_human(self, trace):
# The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n'
reg_tline = re.compile(r'\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(([\s\S]*?) ?\)\s*\n', re.MULTILINE)
reg_other = re.compile(r'[^\n]*\n')
loc = 0
while loc < len(trace):
mo_file_line = reg_tline.match(trace, loc)
if not mo_file_line:
skip_match = reg_other.match(trace, loc)
if not skip_match:
print(trace[loc:])
raise CMakeException('Failed to parse CMake trace')
loc = skip_match.end()
continue
loc = mo_file_line.end()
file = mo_file_line.group(1)
line = mo_file_line.group(3)
func = mo_file_line.group(4)
args = mo_file_line.group(5)
args = parse_generator_expressions(args)
args = args.split(' ')
args = list(map(lambda x: x.strip(), args))
yield CMakeTraceLine(file, line, func, args)
def _lex_trace_json(self, trace: str):
lines = trace.splitlines(keepends=False)
lines.pop(0) # The first line is the version
for i in lines:
data = json.loads(i)
args = data['args']
args = [parse_generator_expressions(x) for x in args]
yield CMakeTraceLine(data['file'], data['line'], data['cmd'], args)
def _flatten_args(self, args: T.List[str]) -> T.List[str]:
# Split lists in arguments
res = [] # type: T.List[str]
for i in args:
res += i.split(';')
return res
def _guess_files(self, broken_list: T.List[str]) -> T.List[str]:
# Nothing has to be done for newer formats
if self.trace_format != 'human':
return broken_list
# Try joining file paths that contain spaces
reg_start = re.compile(r'^([A-Za-z]:)?/(.*/)*[^./]+$')
reg_end = re.compile(r'^.*\.[a-zA-Z]+$')
fixed_list = [] # type: T.List[str]
curr_str = None # type: T.Optional[str]
for i in broken_list:
if curr_str is None:
curr_str = i
elif os.path.isfile(curr_str):
# Abort concatenation if curr_str is an existing file
fixed_list += [curr_str]
curr_str = i
elif not reg_start.match(curr_str):
# Abort concatenation if curr_str no longer matches the regex
fixed_list += [curr_str]
curr_str = i
elif reg_end.match(i) or os.path.exists('{} {}'.format(curr_str, i)):
# File detected
curr_str = '{} {}'.format(curr_str, i)
fixed_list += [curr_str]
curr_str = None
else:
curr_str = '{} {}'.format(curr_str, i)
if curr_str:
fixed_list += [curr_str]
return fixed_list
|
en
| 0.829856
|
# Copyright 2019 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This class contains the basic functionality needed to run any interpreter # or an interpreter-based tool. # Strip the strings in the properties # type: T.List[str] # type: T.List[T.List[str]] # type: T.Optional[str] # type: T.Dict[str, T.List[str]] # type: T.Dict[str, CMakeTarget] # type: T.Set[Path] # T.List of targes that were added with add_custom_command to generate files # type: T.List[CMakeGeneratorTarget] # type: bool # type: str # State for delayed command execution. Delayed command execution is realised # with a custom CMake file that overrides some functions and adds some # introspection information to the trace. # type: T.List[str] # type: T.List[CMakeTraceLine] # All supported functions # Special functions defined in the preload script. # These functions do nothing in the CMake code, but have special # meaning here in the trace parser. # First load the trace (if required) # Second parse the trace # Primary pass -- parse everything # store the function if its execution should be delayed # "Execute" the CMake function if supported # Postprocess # Return the first found CMake variable in list var_list # Return the value of the CMake variable var or an empty list if var does not exist # Generate an exception if the parser is not in permissive mode Handler for the CMake set() function in all variaties. comes in three flavors: set(<var> <value> [PARENT_SCOPE]) set(<var> <value> CACHE <type> <docstring> [FORCE]) set(ENV{<var>} <value>) We don't support the ENV variant, and any uses of it will be ignored silently. the other two variates are supported, with some caveats: - we don't properly handle scoping, so calls to set() inside a function without PARENT_SCOPE set could incorrectly shadow the outer scope. - We don't honor the type of CACHE arguments # DOC: https://cmake.org/cmake/help/latest/command/set.html # 1st remove PARENT_SCOPE and CACHE from args # Discard everything after the CACHE keyword # Now that we've removed extra arguments all that should be left is the # variable identifier and the value, join the value back together to # ensure spaces in the value are correctly handled. This assumes that # variable names don't have spaces. Please don't do that... # Same as unset # DOC: https://cmake.org/cmake/help/latest/command/unset.html # DOC: https://cmake.org/cmake/help/latest/command/add_executable.html # Make a working copy # Make sure the exe is imported # DOC: https://cmake.org/cmake/help/latest/command/add_library.html # Make a working copy # Make sure the lib is imported # Now, only look at the first two arguments (target_name and target_type) and ignore the rest # Now, only look at the first two arguments (target_name and target_ref) and ignore the rest # Simulate the ALIAS with INTERFACE_LINK_LIBRARIES # DOC: https://cmake.org/cmake/help/latest/command/add_custom_command.html # Commands can be passed as ';' seperated lists # Skip the second function signature # DOC: https://cmake.org/cmake/help/latest/command/add_custom_target.html # We only the first parameter (the target name) is interesting # It's pretty much the same as a custom command # DOC: https://cmake.org/cmake/help/latest/command/set_property.html # XXX: APPEND_STRING is specifically *not* supposed to create a # list, is treating them as aliases really okay? # Tries to set property to nothing so nothing has to be done \ CMake trace: set_property(SOURCE) called before the preload script was loaded. Unable to determine CMAKE_CURRENT_SOURCE_DIR. This can lead to build errors. # DOC: https://cmake.org/cmake/help/latest/command/set_target_properties.html # Now we need to try to reconsitute the original quoted format of the # arguments, as a property value could have spaces in it. Unlike # set_property() this is not context free. There are two approaches I # can think of, both have drawbacks: # # 1. Assume that the property will be capitalized ([A-Z_]), this is # convention but cmake doesn't require it. # 2. Maintain a copy of the list here: https://cmake.org/cmake/help/latest/manual/cmake-properties.7.html#target-properties # # Neither of these is awesome for obvious reasons. I'm going to try # option 1 first and fall back to 2, as 1 requires less code and less # synchroniztion for cmake changes. # # With the JSON output format, introduced in CMake 3.17, spaces are # handled properly and we don't have to do either options # type: T.List[T.Tuple[str, T.List[str]]] # DOC: https://cmake.org/cmake/help/latest/command/add_dependencies.html # DOC: https://cmake.org/cmake/help/latest/command/target_compile_definitions.html # DOC: https://cmake.org/cmake/help/latest/command/target_compile_options.html # DOC: https://cmake.org/cmake/help/latest/command/target_include_directories.html # DOC: https://cmake.org/cmake/help/latest/command/target_link_options.html # DOC: https://cmake.org/cmake/help/latest/command/target_link_libraries.html # clear the stored commands # The trace format is: '<file>(<line>): <func>(<args -- can contain \n> )\n' # The first line is the version # Split lists in arguments # type: T.List[str] # Nothing has to be done for newer formats # Try joining file paths that contain spaces # type: T.List[str] # type: T.Optional[str] # Abort concatenation if curr_str is an existing file # Abort concatenation if curr_str no longer matches the regex # File detected
| 1.969269
| 2
|
vislice.py
|
IrinaMarijaPolak/Vislice
| 0
|
6625569
|
import bottle
import model
SKRIVNOST = 'psssst to je moja skrivnost'
DATOTEKA_S_STANJEM = 'stanje.json'
DATOTEKA_Z_BESEDAMI = 'besede.txt'
vislice = model.Vislice(DATOTEKA_S_STANJEM, DATOTEKA_Z_BESEDAMI)
@bottle.get('/')
def index():
return bottle.template('index.tpl')
@bottle.post('/nova_igra/')
def nova_igra():
id_igre = vislice.nova_igra()
bottle.response.set_cookie('id_igre', id_igre, secret=SKRIVNOST, path='/')
bottle.redirect('/igra/')
@bottle.get('/igra/')
def pokazi_igro():
id_igre = bottle.request.get_cookie('id_igre', secret=SKRIVNOST)
return bottle.template('igra.tpl',
igra = vislice.igre[id_igre][0],
id_igre = id_igre,
poskus = vislice.igre[id_igre][1])
@bottle.post('/igra/')
def ugibaj():
id_igre = bottle.request.get_cookie('id_igre', secret=SKRIVNOST)
crka_za_ugib = bottle.request.forms.getunicode("crka")
vislice.ugibaj(id_igre, crka_za_ugib)
bottle.redirect('/igra/')
@bottle.get('/img/<picture>')
def serve_picture(picture):
return bottle.static_file(picture, root = 'img')
bottle.run(reloader=True, debug=True)
|
import bottle
import model
SKRIVNOST = 'psssst to je moja skrivnost'
DATOTEKA_S_STANJEM = 'stanje.json'
DATOTEKA_Z_BESEDAMI = 'besede.txt'
vislice = model.Vislice(DATOTEKA_S_STANJEM, DATOTEKA_Z_BESEDAMI)
@bottle.get('/')
def index():
return bottle.template('index.tpl')
@bottle.post('/nova_igra/')
def nova_igra():
id_igre = vislice.nova_igra()
bottle.response.set_cookie('id_igre', id_igre, secret=SKRIVNOST, path='/')
bottle.redirect('/igra/')
@bottle.get('/igra/')
def pokazi_igro():
id_igre = bottle.request.get_cookie('id_igre', secret=SKRIVNOST)
return bottle.template('igra.tpl',
igra = vislice.igre[id_igre][0],
id_igre = id_igre,
poskus = vislice.igre[id_igre][1])
@bottle.post('/igra/')
def ugibaj():
id_igre = bottle.request.get_cookie('id_igre', secret=SKRIVNOST)
crka_za_ugib = bottle.request.forms.getunicode("crka")
vislice.ugibaj(id_igre, crka_za_ugib)
bottle.redirect('/igra/')
@bottle.get('/img/<picture>')
def serve_picture(picture):
return bottle.static_file(picture, root = 'img')
bottle.run(reloader=True, debug=True)
|
none
| 1
| 2.077425
| 2
|
|
Pregunta5.py
|
RodrigoES22/PC4
| 0
|
6625570
|
<gh_stars>0
#Escriba una expresión regular para cada caso:
#todos los usuarios que sigan el siguente patron. User_mentions:9
#encuentre los numero de likes: likes: 5
#que permita encontrar el numero de retweets. number of retweets: 4
import re
s = "Unfortunately one of those moments wasn't a giant squid monster. User_mentions:2, likes: 9, number of retweets: 7"
print(re.findall(r"User_mentions:\d", s))
print(re.findall(r"likes: \d", s))
print(re.findall(r"number of retweets: \d", s))
|
#Escriba una expresión regular para cada caso:
#todos los usuarios que sigan el siguente patron. User_mentions:9
#encuentre los numero de likes: likes: 5
#que permita encontrar el numero de retweets. number of retweets: 4
import re
s = "Unfortunately one of those moments wasn't a giant squid monster. User_mentions:2, likes: 9, number of retweets: 7"
print(re.findall(r"User_mentions:\d", s))
print(re.findall(r"likes: \d", s))
print(re.findall(r"number of retweets: \d", s))
|
es
| 0.845249
|
#Escriba una expresión regular para cada caso: #todos los usuarios que sigan el siguente patron. User_mentions:9 #encuentre los numero de likes: likes: 5 #que permita encontrar el numero de retweets. number of retweets: 4
| 3.816543
| 4
|
homeassistant/components/xiaomi_aqara/__init__.py
|
learn-home-automation/core
| 1
|
6625571
|
<reponame>learn-home-automation/core<filename>homeassistant/components/xiaomi_aqara/__init__.py
"""Support for Xiaomi Gateways."""
from datetime import timedelta
import logging
import voluptuous as vol
from xiaomi_gateway import XiaomiGateway, XiaomiGatewayDiscovery
from homeassistant import config_entries, core
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_VOLTAGE,
CONF_HOST,
CONF_MAC,
CONF_PORT,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .const import (
CONF_INTERFACE,
CONF_KEY,
CONF_SID,
DEFAULT_DISCOVERY_RETRY,
DOMAIN,
GATEWAYS_KEY,
LISTENER_KEY,
)
_LOGGER = logging.getLogger(__name__)
GATEWAY_PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.COVER,
Platform.LIGHT,
Platform.LOCK,
Platform.SENSOR,
Platform.SWITCH,
]
GATEWAY_PLATFORMS_NO_KEY = [Platform.BINARY_SENSOR, Platform.SENSOR]
ATTR_GW_MAC = "gw_mac"
ATTR_RINGTONE_ID = "ringtone_id"
ATTR_RINGTONE_VOL = "ringtone_vol"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = "play_ringtone"
SERVICE_STOP_RINGTONE = "stop_ringtone"
SERVICE_ADD_DEVICE = "add_device"
SERVICE_REMOVE_DEVICE = "remove_device"
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema(
{
vol.Required(ATTR_RINGTONE_ID): vol.All(
vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])
),
vol.Optional(ATTR_RINGTONE_VOL): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
),
}
)
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema(
{vol.Required(ATTR_DEVICE_ID): vol.All(cv.string, vol.Length(min=14, max=14))}
)
def setup(hass, config):
"""Set up the Xiaomi component."""
def play_ringtone_service(call: ServiceCall) -> None:
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {"mid": ring_id}
if (ring_vol := call.data.get(ATTR_RINGTONE_VOL)) is not None:
kwargs["vol"] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call: ServiceCall) -> None:
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call: ServiceCall) -> None:
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission="yes")
hass.components.persistent_notification.async_create(
"Join permission enabled for 30 seconds! "
"Please press the pairing button of the new device once.",
title="Xiaomi Aqara Gateway",
)
def remove_device_service(call: ServiceCall) -> None:
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(hass, vol.Schema({}))
hass.services.register(
DOMAIN,
SERVICE_PLAY_RINGTONE,
play_ringtone_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_PLAY_RINGTONE),
)
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_DEVICE,
remove_device_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_REMOVE_DEVICE),
)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the xiaomi aqara components from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(GATEWAYS_KEY, {})
# Connect to Xiaomi Aqara Gateway
xiaomi_gateway = await hass.async_add_executor_job(
XiaomiGateway,
entry.data[CONF_HOST],
entry.data[CONF_SID],
entry.data[CONF_KEY],
DEFAULT_DISCOVERY_RETRY,
entry.data[CONF_INTERFACE],
entry.data[CONF_PORT],
entry.data[CONF_PROTOCOL],
)
hass.data[DOMAIN][GATEWAYS_KEY][entry.entry_id] = xiaomi_gateway
gateway_discovery = hass.data[DOMAIN].setdefault(
LISTENER_KEY,
XiaomiGatewayDiscovery(hass.add_job, [], entry.data[CONF_INTERFACE]),
)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 1:
# start listining for local pushes (only once)
await hass.async_add_executor_job(gateway_discovery.listen)
# register stop callback to shutdown listining for local pushes
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery.stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
gateway_discovery.gateways[entry.data[CONF_HOST]] = xiaomi_gateway
_LOGGER.debug(
"Gateway with host '%s' connected, listening for broadcasts",
entry.data[CONF_HOST],
)
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="<NAME>",
name=entry.title,
sw_version=entry.data[CONF_PROTOCOL],
)
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
hass.config_entries.async_setup_platforms(entry, platforms)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][GATEWAYS_KEY].pop(entry.entry_id)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 0:
# No gateways left, stop Xiaomi socket
hass.data[DOMAIN].pop(GATEWAYS_KEY)
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery = hass.data[DOMAIN].pop(LISTENER_KEY)
await hass.async_add_executor_job(gateway_discovery.stop_listen)
return unload_ok
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub, config_entry):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device["sid"]
self._model = device["model"]
self._protocol = device["proto"]
self._name = f"{device_type}_{self._sid}"
self._device_name = f"{self._model}_{self._sid}"
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._extra_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device["data"], device["raw_data"])
self.parse_voltage(device["data"])
if hasattr(self, "_data_key") and self._data_key: # pylint: disable=no-member
self._unique_id = (
f"{self._data_key}{self._sid}" # pylint: disable=no-member
)
else:
self._unique_id = f"{self._type}{self._sid}"
self._gateway_id = config_entry.unique_id
if config_entry.data[CONF_MAC] == format_mac(self._sid):
# this entity belongs to the gateway itself
self._is_gateway = True
self._device_id = config_entry.unique_id
else:
# this entity is connected through zigbee
self._is_gateway = False
self._device_id = self._sid
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_id(self):
"""Return the device id of the Xiaomi Aqara device."""
return self._device_id
@property
def device_info(self) -> DeviceInfo:
"""Return the device info of the Xiaomi Aqara device."""
if self._is_gateway:
device_info = DeviceInfo(
identifiers={(DOMAIN, self._device_id)},
model=self._model,
)
else:
device_info = DeviceInfo(
connections={(dr.CONNECTION_ZIGBEE, self._device_id)},
identifiers={(DOMAIN, self._device_id)},
manufacturer="Xiaomi Aqara",
model=self._model,
name=self._device_name,
sw_version=self._protocol,
via_device=(DOMAIN, self._gateway_id),
)
return device_info
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._extra_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_write_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable, utcnow() + TIME_TILL_UNAVAILABLE
)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_write_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if "voltage" in data:
voltage_key = "voltage"
elif "battery_voltage" in data:
voltage_key = "battery_voltage"
else:
return False
max_volt = 3300
min_volt = 2800
voltage = data[voltage_key]
self._extra_state_attributes[ATTR_VOLTAGE] = round(voltage / 1000.0, 2)
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._extra_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(hass, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(":", "").lower()
for gateway in hass.data[DOMAIN][GATEWAYS_KEY].values():
if gateway.sid == sid:
return gateway
raise vol.Invalid(f"Unknown gateway sid {sid}")
kwargs = {}
if (xiaomi_data := hass.data.get(DOMAIN)) is not None:
gateways = list(xiaomi_data[GATEWAYS_KEY].values())
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs["default"] = gateways[0].sid
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway})
|
"""Support for Xiaomi Gateways."""
from datetime import timedelta
import logging
import voluptuous as vol
from xiaomi_gateway import XiaomiGateway, XiaomiGatewayDiscovery
from homeassistant import config_entries, core
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_ID,
ATTR_VOLTAGE,
CONF_HOST,
CONF_MAC,
CONF_PORT,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.helpers import device_registry as dr
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .const import (
CONF_INTERFACE,
CONF_KEY,
CONF_SID,
DEFAULT_DISCOVERY_RETRY,
DOMAIN,
GATEWAYS_KEY,
LISTENER_KEY,
)
_LOGGER = logging.getLogger(__name__)
GATEWAY_PLATFORMS = [
Platform.BINARY_SENSOR,
Platform.COVER,
Platform.LIGHT,
Platform.LOCK,
Platform.SENSOR,
Platform.SWITCH,
]
GATEWAY_PLATFORMS_NO_KEY = [Platform.BINARY_SENSOR, Platform.SENSOR]
ATTR_GW_MAC = "gw_mac"
ATTR_RINGTONE_ID = "ringtone_id"
ATTR_RINGTONE_VOL = "ringtone_vol"
TIME_TILL_UNAVAILABLE = timedelta(minutes=150)
SERVICE_PLAY_RINGTONE = "play_ringtone"
SERVICE_STOP_RINGTONE = "stop_ringtone"
SERVICE_ADD_DEVICE = "add_device"
SERVICE_REMOVE_DEVICE = "remove_device"
SERVICE_SCHEMA_PLAY_RINGTONE = vol.Schema(
{
vol.Required(ATTR_RINGTONE_ID): vol.All(
vol.Coerce(int), vol.NotIn([9, 14, 15, 16, 17, 18, 19])
),
vol.Optional(ATTR_RINGTONE_VOL): vol.All(
vol.Coerce(int), vol.Clamp(min=0, max=100)
),
}
)
SERVICE_SCHEMA_REMOVE_DEVICE = vol.Schema(
{vol.Required(ATTR_DEVICE_ID): vol.All(cv.string, vol.Length(min=14, max=14))}
)
def setup(hass, config):
"""Set up the Xiaomi component."""
def play_ringtone_service(call: ServiceCall) -> None:
"""Service to play ringtone through Gateway."""
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {"mid": ring_id}
if (ring_vol := call.data.get(ATTR_RINGTONE_VOL)) is not None:
kwargs["vol"] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call: ServiceCall) -> None:
"""Service to stop playing ringtone on Gateway."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call: ServiceCall) -> None:
"""Service to add a new sub-device within the next 30 seconds."""
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission="yes")
hass.components.persistent_notification.async_create(
"Join permission enabled for 30 seconds! "
"Please press the pairing button of the new device once.",
title="Xiaomi Aqara Gateway",
)
def remove_device_service(call: ServiceCall) -> None:
"""Service to remove a sub-device from the gateway."""
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(hass, vol.Schema({}))
hass.services.register(
DOMAIN,
SERVICE_PLAY_RINGTONE,
play_ringtone_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_PLAY_RINGTONE),
)
hass.services.register(
DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema
)
hass.services.register(
DOMAIN,
SERVICE_REMOVE_DEVICE,
remove_device_service,
schema=_add_gateway_to_schema(hass, SERVICE_SCHEMA_REMOVE_DEVICE),
)
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the xiaomi aqara components from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(GATEWAYS_KEY, {})
# Connect to Xiaomi Aqara Gateway
xiaomi_gateway = await hass.async_add_executor_job(
XiaomiGateway,
entry.data[CONF_HOST],
entry.data[CONF_SID],
entry.data[CONF_KEY],
DEFAULT_DISCOVERY_RETRY,
entry.data[CONF_INTERFACE],
entry.data[CONF_PORT],
entry.data[CONF_PROTOCOL],
)
hass.data[DOMAIN][GATEWAYS_KEY][entry.entry_id] = xiaomi_gateway
gateway_discovery = hass.data[DOMAIN].setdefault(
LISTENER_KEY,
XiaomiGatewayDiscovery(hass.add_job, [], entry.data[CONF_INTERFACE]),
)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 1:
# start listining for local pushes (only once)
await hass.async_add_executor_job(gateway_discovery.listen)
# register stop callback to shutdown listining for local pushes
def stop_xiaomi(event):
"""Stop Xiaomi Socket."""
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery.stop_listen()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
gateway_discovery.gateways[entry.data[CONF_HOST]] = xiaomi_gateway
_LOGGER.debug(
"Gateway with host '%s' connected, listening for broadcasts",
entry.data[CONF_HOST],
)
device_registry = dr.async_get(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, entry.unique_id)},
manufacturer="<NAME>",
name=entry.title,
sw_version=entry.data[CONF_PROTOCOL],
)
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
hass.config_entries.async_setup_platforms(entry, platforms)
return True
async def async_unload_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Unload a config entry."""
if entry.data[CONF_KEY] is not None:
platforms = GATEWAY_PLATFORMS
else:
platforms = GATEWAY_PLATFORMS_NO_KEY
unload_ok = await hass.config_entries.async_unload_platforms(entry, platforms)
if unload_ok:
hass.data[DOMAIN][GATEWAYS_KEY].pop(entry.entry_id)
if len(hass.data[DOMAIN][GATEWAYS_KEY]) == 0:
# No gateways left, stop Xiaomi socket
hass.data[DOMAIN].pop(GATEWAYS_KEY)
_LOGGER.debug("Shutting down Xiaomi Gateway Listener")
gateway_discovery = hass.data[DOMAIN].pop(LISTENER_KEY)
await hass.async_add_executor_job(gateway_discovery.stop_listen)
return unload_ok
class XiaomiDevice(Entity):
"""Representation a base Xiaomi device."""
def __init__(self, device, device_type, xiaomi_hub, config_entry):
"""Initialize the Xiaomi device."""
self._state = None
self._is_available = True
self._sid = device["sid"]
self._model = device["model"]
self._protocol = device["proto"]
self._name = f"{device_type}_{self._sid}"
self._device_name = f"{self._model}_{self._sid}"
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._extra_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device["data"], device["raw_data"])
self.parse_voltage(device["data"])
if hasattr(self, "_data_key") and self._data_key: # pylint: disable=no-member
self._unique_id = (
f"{self._data_key}{self._sid}" # pylint: disable=no-member
)
else:
self._unique_id = f"{self._type}{self._sid}"
self._gateway_id = config_entry.unique_id
if config_entry.data[CONF_MAC] == format_mac(self._sid):
# this entity belongs to the gateway itself
self._is_gateway = True
self._device_id = config_entry.unique_id
else:
# this entity is connected through zigbee
self._is_gateway = False
self._device_id = self._sid
def _add_push_data_job(self, *args):
self.hass.add_job(self.push_data, *args)
async def async_added_to_hass(self):
"""Start unavailability tracking."""
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_id(self):
"""Return the device id of the Xiaomi Aqara device."""
return self._device_id
@property
def device_info(self) -> DeviceInfo:
"""Return the device info of the Xiaomi Aqara device."""
if self._is_gateway:
device_info = DeviceInfo(
identifiers={(DOMAIN, self._device_id)},
model=self._model,
)
else:
device_info = DeviceInfo(
connections={(dr.CONNECTION_ZIGBEE, self._device_id)},
identifiers={(DOMAIN, self._device_id)},
manufacturer="Xiaomi Aqara",
model=self._model,
name=self._device_name,
sw_version=self._protocol,
via_device=(DOMAIN, self._gateway_id),
)
return device_info
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def should_poll(self):
"""Return the polling state. No polling needed."""
return False
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._extra_state_attributes
@callback
def _async_set_unavailable(self, now):
"""Set state to UNAVAILABLE."""
self._remove_unavailability_tracker = None
self._is_available = False
self.async_write_ha_state()
@callback
def _async_track_unavailable(self):
if self._remove_unavailability_tracker:
self._remove_unavailability_tracker()
self._remove_unavailability_tracker = async_track_point_in_utc_time(
self.hass, self._async_set_unavailable, utcnow() + TIME_TILL_UNAVAILABLE
)
if not self._is_available:
self._is_available = True
return True
return False
@callback
def push_data(self, data, raw_data):
"""Push from Hub."""
_LOGGER.debug("PUSH >> %s: %s", self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if is_data or is_voltage or was_unavailable:
self.async_write_ha_state()
def parse_voltage(self, data):
"""Parse battery level data sent by gateway."""
if "voltage" in data:
voltage_key = "voltage"
elif "battery_voltage" in data:
voltage_key = "battery_voltage"
else:
return False
max_volt = 3300
min_volt = 2800
voltage = data[voltage_key]
self._extra_state_attributes[ATTR_VOLTAGE] = round(voltage / 1000.0, 2)
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = ((voltage - min_volt) / (max_volt - min_volt)) * 100
self._extra_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True
def parse_data(self, data, raw_data):
"""Parse data sent by gateway."""
raise NotImplementedError()
def _add_gateway_to_schema(hass, schema):
"""Extend a voluptuous schema with a gateway validator."""
def gateway(sid):
"""Convert sid to a gateway."""
sid = str(sid).replace(":", "").lower()
for gateway in hass.data[DOMAIN][GATEWAYS_KEY].values():
if gateway.sid == sid:
return gateway
raise vol.Invalid(f"Unknown gateway sid {sid}")
kwargs = {}
if (xiaomi_data := hass.data.get(DOMAIN)) is not None:
gateways = list(xiaomi_data[GATEWAYS_KEY].values())
# If the user has only 1 gateway, make it the default for services.
if len(gateways) == 1:
kwargs["default"] = gateways[0].sid
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway})
|
en
| 0.800112
|
Support for Xiaomi Gateways. Set up the Xiaomi component. Service to play ringtone through Gateway. Service to stop playing ringtone on Gateway. Service to add a new sub-device within the next 30 seconds. Service to remove a sub-device from the gateway. Set up the xiaomi aqara components from a config entry. # Connect to Xiaomi Aqara Gateway # start listining for local pushes (only once) # register stop callback to shutdown listining for local pushes Stop Xiaomi Socket. Unload a config entry. # No gateways left, stop Xiaomi socket Representation a base Xiaomi device. Initialize the Xiaomi device. # pylint: disable=no-member # pylint: disable=no-member # this entity belongs to the gateway itself # this entity is connected through zigbee Start unavailability tracking. Return the name of the device. Return a unique ID. Return the device id of the Xiaomi Aqara device. Return the device info of the Xiaomi Aqara device. Return True if entity is available. Return the polling state. No polling needed. Return the state attributes. Set state to UNAVAILABLE. Push from Hub. Parse battery level data sent by gateway. Parse data sent by gateway. Extend a voluptuous schema with a gateway validator. Convert sid to a gateway. # If the user has only 1 gateway, make it the default for services.
| 1.767355
| 2
|
exercises/exercise_9_30_16.py
|
JSBCCA/pythoncode
| 0
|
6625572
|
<filename>exercises/exercise_9_30_16.py
from random import randint, choice, shuffle
password = ''
rand = randint(4, 8)
items = ["abcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"0123456789", "!@#$%&*_+>:;\/<?"]
shuffle(items)
for string in items:
password += choice(string)
for i in range(rand):
password += choice(choice(items))
print(password)
|
<filename>exercises/exercise_9_30_16.py
from random import randint, choice, shuffle
password = ''
rand = randint(4, 8)
items = ["abcdefghijklmnopqrstuvwxyz", "ABCDEFGHIJKLMNOPQRSTUVWXYZ",
"0123456789", "!@#$%&*_+>:;\/<?"]
shuffle(items)
for string in items:
password += choice(string)
for i in range(rand):
password += choice(choice(items))
print(password)
|
ja
| 0.337682
|
#$%&*_+>:;\/<?"]
| 3.603919
| 4
|
python/Learn-Python-The-Hard-Way/hashmap.py
|
pepincho/playground
| 0
|
6625573
|
# exercise 39
def new(num_buckets = 256):
"""Initializes a Map with the given number of buckets."""
aMap = []
for i in range(0, num_buckets):
aMap.append([])
return aMap
def hash_key(aMap, key):
"""Given a key this will create a number and then convert to
an index for the aMap's buckets."""
return hash(key) % len(aMap)
def get_bucket(aMap, key):
"""Given a key, find the bucket where it would go."""
bucket_id = hash_key(aMap, key)
return aMap[bucket_id]
def get_slot(aMap, key, default = None):
"""
Returns the index, key, and value of a slot found in a bucket.
Returns -1, key, and default (None if not set) when not found.
"""
bucket = get_bucket(aMap, key)
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
return i, k, v
return -1, key, default
def get(aMap, key, default = None):
"""Gets the value in a bucket for the given key, or the default."""
i, k, v = get_slot(aMap, key, default = default)
return v
def set(aMap, key, value):
"""Sets the key to the value, replacing any existing value."""
bucket = get_bucket(aMap, key)
i, k, v = get_slot(aMap, key)
if i >= 0:
# the key exists, replate it
bucket[i] = (key, value)
else:
# the key does not, append to create it
bucket.append((key, value))
def delete(aMap, key):
"""Deletes the given key from the Map."""
bucket = get_bucket(aMap, key)
for i in xrange(len(bucket)):
k, v = bucket[i]
if key == k:
del bucket[i]
break
def list(aMap):
"""Prints out what's in the Map."""
for bucket in aMap:
if bucket:
for k, v in bucket:
print (k, v)
|
# exercise 39
def new(num_buckets = 256):
"""Initializes a Map with the given number of buckets."""
aMap = []
for i in range(0, num_buckets):
aMap.append([])
return aMap
def hash_key(aMap, key):
"""Given a key this will create a number and then convert to
an index for the aMap's buckets."""
return hash(key) % len(aMap)
def get_bucket(aMap, key):
"""Given a key, find the bucket where it would go."""
bucket_id = hash_key(aMap, key)
return aMap[bucket_id]
def get_slot(aMap, key, default = None):
"""
Returns the index, key, and value of a slot found in a bucket.
Returns -1, key, and default (None if not set) when not found.
"""
bucket = get_bucket(aMap, key)
for i, kv in enumerate(bucket):
k, v = kv
if key == k:
return i, k, v
return -1, key, default
def get(aMap, key, default = None):
"""Gets the value in a bucket for the given key, or the default."""
i, k, v = get_slot(aMap, key, default = default)
return v
def set(aMap, key, value):
"""Sets the key to the value, replacing any existing value."""
bucket = get_bucket(aMap, key)
i, k, v = get_slot(aMap, key)
if i >= 0:
# the key exists, replate it
bucket[i] = (key, value)
else:
# the key does not, append to create it
bucket.append((key, value))
def delete(aMap, key):
"""Deletes the given key from the Map."""
bucket = get_bucket(aMap, key)
for i in xrange(len(bucket)):
k, v = bucket[i]
if key == k:
del bucket[i]
break
def list(aMap):
"""Prints out what's in the Map."""
for bucket in aMap:
if bucket:
for k, v in bucket:
print (k, v)
|
en
| 0.81344
|
# exercise 39 Initializes a Map with the given number of buckets. Given a key this will create a number and then convert to an index for the aMap's buckets. Given a key, find the bucket where it would go. Returns the index, key, and value of a slot found in a bucket. Returns -1, key, and default (None if not set) when not found. Gets the value in a bucket for the given key, or the default. Sets the key to the value, replacing any existing value. # the key exists, replate it # the key does not, append to create it Deletes the given key from the Map. Prints out what's in the Map.
| 3.854462
| 4
|
ds_get/__init__.py
|
dekomote/ds_get
| 0
|
6625574
|
<reponame>dekomote/ds_get
"""Top-level package for DS PyGet."""
__author__ = """<NAME>"""
__email__ = "<EMAIL>"
__version__ = "0.1.0"
|
"""Top-level package for DS PyGet."""
__author__ = """<NAME>"""
__email__ = "<EMAIL>"
__version__ = "0.1.0"
|
en
| 0.573191
|
Top-level package for DS PyGet. <NAME>
| 1.020623
| 1
|
QtPyNetwork/server/QBalancedServer.py
|
desty2k/QtPyNetwork
| 0
|
6625575
|
from qtpy.QtCore import Slot, Signal, QObject, QThread, Qt
from qtpy.QtNetwork import QTcpSocket
import struct
import logging
from QtPyNetwork.server.BaseServer import QBaseServer
__all__ = ["QBalancedServer"]
class _SocketWorker(QObject):
"""_SocketWorker manages sockets and handles messages.
Outgoing signals:
- disconnected (device_id: int): Client disconnected.
- connected (device_id: int, ip: str, port: int): Client connected.
- message (device_id: int, message: bytes): Message from client.
- error (device_id: int, error: str): Error occured.
- closed (): Closed successfully.
Incomming signals:
- write (device_id: int, message: bytes): Emit to send message
to client with ID in this worker.
- write_all (message: bytes): Emit to send message
to all clients in this worker.
- kick (device_id: int): Emit to kick device with ID
in this worker.
- connection (device_id: int, socket_descriptor: int): Emit to create new socket
object in this worker.
- close_signal (): Emit to close all connections.
"""
disconnected = Signal(int)
connected = Signal(int, str, int)
message = Signal(int, bytes)
error = Signal(int, str)
closed = Signal()
connection = Signal(int, int)
close_signal = Signal()
write = Signal(int, bytes)
write_all = Signal(bytes)
kick = Signal(int)
def __init__(self, parent=None):
super(_SocketWorker, self).__init__(parent)
@Slot()
def start(self) -> None:
"""Run socket worker."""
self.sockets = [] # noqa
self.data = {} # noqa
self.__logger = logging.getLogger(self.__class__.__name__) # noqa
self.connection.connect(self.on_connection, Qt.BlockingQueuedConnection) # noqa
self.close_signal.connect(self.close, Qt.BlockingQueuedConnection)
self.write.connect(self._write)
self.write_all.connect(self._write_all)
self.kick.connect(self._kick)
@Slot(int)
def _kick(self, device_id):
"""Executed inside worker thread after emitting kick signal.
Args:
device_id (int): ID of device to kick.
"""
socket = self.get_socket_by_id(device_id)
if socket:
socket.close()
return
self.__logger.error("Could not find socket with ID: {}!".format(device_id))
@Slot(int, bytes)
def _write(self, device_id: int, message: bytes):
"""Send task to client. DO NOT USE THIS! Emit write signal instead!
Args:
device_id (int): Client ID.
message (bytes): Message to send.
"""
socket = self.get_socket_by_id(int(device_id))
if socket:
message = struct.pack('!L', len(message)) + message
socket.write(message)
socket.flush()
else:
self.__logger.warning("Could not find socket with specified ID.")
@Slot(bytes)
def _write_all(self, message: bytes):
"""Send task to all connected clients. DO NOT USE THIS! Emit write_all signal instead!
Args:
message (bytes): Message to send.
"""
for socket in self.sockets:
message = struct.pack('!L', len(message)) + message
socket.write(message)
socket.flush()
@Slot(int)
def get_socket_by_id(self, device_id: int):
"""Returns socket object associated to provided ID.
Args:
device_id (int): Socket ID.
"""
for conn in self.sockets:
if int(conn.objectName()) == int(device_id):
return conn
return None
@Slot()
def socket_count(self):
"""Returns amount of active sockets."""
return len(self.sockets)
@Slot()
def used_ids(self):
"""Returns IDs used by this worker."""
return [int(x.objectName()) for x in self.sockets]
@Slot(int)
def has_device_id(self, device_id: int):
"""Check if this thread has socket with ID.
Args:
device_id (int): Socket ID.
"""
return int(device_id) in [int(socket.objectName()) for socket in self.sockets]
@Slot(int, int)
def on_connection(self, device_id: int, socket_descriptor: int):
"""Create new QTcpSocket object and setup connection with client.
Args:
device_id (int): Socket ID.
socket_descriptor (int) Socket descriptor.
Note:
Emits connected signal.
"""
socket = QTcpSocket()
socket.setParent(None) # noqa
if socket.setSocketDescriptor(socket_descriptor): # noqa
socket.readyRead.connect(lambda: self.on_message(socket)) # noqa
socket.disconnected.connect(lambda: self.on_disconnected(socket)) # noqa
socket.error.connect(lambda: self.on_error(socket)) # noqa
socket.setObjectName(str(device_id))
self.sockets.append(socket)
self.__logger.debug("New connection from CLIENT-{} "
"IP: {}:{}".format(socket.objectName(),
socket.peerAddress().toString(),
socket.peerPort()))
self.connected.emit(int(socket.objectName()), socket.peerAddress().toString(), socket.peerPort())
@Slot(QTcpSocket)
def on_message(self, conn):
"""Handle socket messages.
Note:
Emits message signal.
"""
device_id = int(conn.objectName())
while conn.bytesAvailable():
if device_id in self.data:
size_left = self.data.get(device_id).get("size_left")
message = conn.read(size_left)
size_left = size_left - len(message)
if size_left > 0:
self.data[device_id]["size_left"] = size_left
self.data[device_id]["data"] += message
else:
message = self.data.get(device_id).get("data") + message
del self.data[device_id]
self.message.emit(device_id, message)
else:
header_size = struct.calcsize('!L')
header = conn.read(header_size)
if len(header) == 4:
msg_size = struct.unpack('!L', header)[0]
message = conn.read(msg_size)
if len(message) < msg_size:
msg_size = msg_size - len(message)
self.data[device_id] = {"data": message, "size_left": msg_size}
else:
self.message.emit(device_id, message)
@Slot(QTcpSocket)
def on_disconnected(self, conn):
"""Handle socket disconnection.
Args:
conn (QTcpSocket): Socket object.
Note:
Emits disconnected signal.
"""
device_id = int(conn.objectName())
if conn in self.sockets:
try:
conn.close()
self.sockets.remove(conn)
except RuntimeError:
pass
if device_id in self.data:
del self.data[device_id]
self.__logger.info("CLIENT-{} Disconnected {}: {}:{}".format(device_id, conn.peerName(),
conn.peerAddress().toString(),
conn.peerPort()))
self.disconnected.emit(device_id)
@Slot(QTcpSocket)
def on_error(self, conn: QTcpSocket):
"""Handle socket errors.
Args:
conn (QTcpSocket): Socket object.
Note:
Emits error signal.
"""
device_id = int(conn.objectName())
e = conn.errorString()
self.__logger.error("CLIENT-{} Error: {}".format(device_id, e))
self.error.emit(device_id, str(e))
@Slot()
def close(self):
"""Close all connections.
Note:
Emits closed signal.
"""
for conn in self.sockets:
conn.close()
try:
conn.deleteLater()
self.sockets.remove(conn)
except ValueError:
pass
self.closed.emit()
class _BalancedSocketHandler(QObject):
"""Creates socket handlers threads. New sockets
are passed to worker with least load.
Outgoing signals:
- started (): Handler started.
- closed (): Handler closed all connections.
- message (client_id: int, message: bytes): Message received.
- error (client_id: int, error: str): Socket error.
- disconnected (client_id: int): Client disconnected.
Incomming signals:
- write (device_id: int, message: bytes): Emit to send message
to client with ID.
- write_all (message: bytes): Emit to send message
to all clients.
- kick (device_id: int): Emit to kick client with ID.
- close_signal (): Emit to close all connections.
"""
started = Signal()
closed = Signal()
connected = Signal(int, str, int)
message = Signal(int, bytes)
error = Signal(int, str)
disconnected = Signal(int)
write = Signal(int, bytes)
write_all = Signal(bytes)
kick = Signal(int)
close_signal = Signal()
def __init__(self, cores=None):
super(_BalancedSocketHandler, self).__init__(None)
self.cores = cores
@Slot()
def start(self):
"""Start server and create socket workers."""
self.__logger = logging.getLogger(self.__class__.__name__) # noqa
self.workers = [] # noqa
self.threads = [] # noqa
if not self.cores:
self.cores = QThread.idealThreadCount()
self.close_signal.connect(self.close, Qt.BlockingQueuedConnection)
self.write.connect(self._write)
self.write_all.connect(self._write_all)
self.kick.connect(self._kick)
self.__logger.debug("Allocating {} worker threads...".format(self.cores))
try:
for i in range(self.cores):
self.create_worker()
self.__logger.info("Started worker threads!")
self.__logger.debug("Active socket workers: {}".format(sum([1 for x in self.threads if x.isRunning()])))
self.started.emit()
except Exception as e:
self.__logger.error("Failed to start socket handler: {}".format(e))
self.close()
@Slot()
def create_worker(self):
"""Creates new socket worker in thread."""
thread = QThread()
worker = _SocketWorker()
worker.moveToThread(thread)
worker.connected.connect(self.connected.emit) # noqa
worker.message.connect(self.message.emit) # noqa
worker.disconnected.connect(self.disconnected.emit) # noqa
worker.error.connect(self.error.emit) # noqa
thread.started.connect(worker.start) # noqa
worker.closed.connect(thread.quit) # noqa
worker.closed.connect(thread.wait) # noqa
self.workers.append(worker)
self.threads.append(thread)
thread.start()
@Slot(int)
def on_incoming_connection(self, socket_descriptor: int) -> None:
"""Select thread with least sockets and setup connection.
Assign not used ID.
Args:
socket_descriptor (int): Socket descriptor.
"""
count_list = [x.socket_count() for x in self.workers]
worker_id = count_list.index(min(count_list))
device_id = self.get_free_id()
self.workers[worker_id].connection.emit(device_id, socket_descriptor)
@Slot(int, bytes)
def _write(self, device_id: int, message: bytes) -> None:
"""Write to client with ID.
Args:
device_id (int): Client ID.
message (bytes): Message.
"""
for worker in self.workers:
if worker.has_device_id(device_id):
worker.write.emit(device_id, message)
return
self.__logger.error("Could not find client with ID: {}!".format(device_id))
@Slot(int)
def _kick(self, device_id: int) -> None:
"""Kick client with ID.
Args:
device_id (int): Client ID.
"""
for worker in self.workers:
if worker.has_device_id(device_id):
worker.kick.emit(device_id)
return
self.__logger.error("Could not find client with ID: {}!".format(device_id))
@Slot(bytes)
def _write_all(self, message: bytes) -> None:
"""Write to all clients
Args:
message (bytes): Message.
"""
for worker in self.workers:
worker.write_all.emit(message)
@Slot()
def get_free_id(self) -> int:
"""Returns not used device ID."""
used = []
for i in self.workers:
used = used + i.used_ids()
used = sorted(used)
if len(used) > 0:
maxid = max(used)
for i in range(1, maxid):
if i not in used:
return i
return maxid + 1
else:
return 1
@Slot()
def close(self) -> None:
"""Close server and all socket handlers.
Note:
Emits closed signal when successfully closed.
"""
for worker in self.workers:
worker.close_signal.emit()
worker.deleteLater()
for thread in self.threads:
thread.quit()
self.__logger.debug("Socket handler closed successfully")
self.closed.emit()
class QBalancedServer(QBaseServer):
"""TCP server with constant amount of threads. When new client connects, server
checks which worker has the least amount of active sockets and passes socket
descriptor to that thread."""
def __init__(self, *args, **kwargs):
super(QBalancedServer, self).__init__(*args, **kwargs)
self.set_handler_class(_BalancedSocketHandler)
|
from qtpy.QtCore import Slot, Signal, QObject, QThread, Qt
from qtpy.QtNetwork import QTcpSocket
import struct
import logging
from QtPyNetwork.server.BaseServer import QBaseServer
__all__ = ["QBalancedServer"]
class _SocketWorker(QObject):
"""_SocketWorker manages sockets and handles messages.
Outgoing signals:
- disconnected (device_id: int): Client disconnected.
- connected (device_id: int, ip: str, port: int): Client connected.
- message (device_id: int, message: bytes): Message from client.
- error (device_id: int, error: str): Error occured.
- closed (): Closed successfully.
Incomming signals:
- write (device_id: int, message: bytes): Emit to send message
to client with ID in this worker.
- write_all (message: bytes): Emit to send message
to all clients in this worker.
- kick (device_id: int): Emit to kick device with ID
in this worker.
- connection (device_id: int, socket_descriptor: int): Emit to create new socket
object in this worker.
- close_signal (): Emit to close all connections.
"""
disconnected = Signal(int)
connected = Signal(int, str, int)
message = Signal(int, bytes)
error = Signal(int, str)
closed = Signal()
connection = Signal(int, int)
close_signal = Signal()
write = Signal(int, bytes)
write_all = Signal(bytes)
kick = Signal(int)
def __init__(self, parent=None):
super(_SocketWorker, self).__init__(parent)
@Slot()
def start(self) -> None:
"""Run socket worker."""
self.sockets = [] # noqa
self.data = {} # noqa
self.__logger = logging.getLogger(self.__class__.__name__) # noqa
self.connection.connect(self.on_connection, Qt.BlockingQueuedConnection) # noqa
self.close_signal.connect(self.close, Qt.BlockingQueuedConnection)
self.write.connect(self._write)
self.write_all.connect(self._write_all)
self.kick.connect(self._kick)
@Slot(int)
def _kick(self, device_id):
"""Executed inside worker thread after emitting kick signal.
Args:
device_id (int): ID of device to kick.
"""
socket = self.get_socket_by_id(device_id)
if socket:
socket.close()
return
self.__logger.error("Could not find socket with ID: {}!".format(device_id))
@Slot(int, bytes)
def _write(self, device_id: int, message: bytes):
"""Send task to client. DO NOT USE THIS! Emit write signal instead!
Args:
device_id (int): Client ID.
message (bytes): Message to send.
"""
socket = self.get_socket_by_id(int(device_id))
if socket:
message = struct.pack('!L', len(message)) + message
socket.write(message)
socket.flush()
else:
self.__logger.warning("Could not find socket with specified ID.")
@Slot(bytes)
def _write_all(self, message: bytes):
"""Send task to all connected clients. DO NOT USE THIS! Emit write_all signal instead!
Args:
message (bytes): Message to send.
"""
for socket in self.sockets:
message = struct.pack('!L', len(message)) + message
socket.write(message)
socket.flush()
@Slot(int)
def get_socket_by_id(self, device_id: int):
"""Returns socket object associated to provided ID.
Args:
device_id (int): Socket ID.
"""
for conn in self.sockets:
if int(conn.objectName()) == int(device_id):
return conn
return None
@Slot()
def socket_count(self):
"""Returns amount of active sockets."""
return len(self.sockets)
@Slot()
def used_ids(self):
"""Returns IDs used by this worker."""
return [int(x.objectName()) for x in self.sockets]
@Slot(int)
def has_device_id(self, device_id: int):
"""Check if this thread has socket with ID.
Args:
device_id (int): Socket ID.
"""
return int(device_id) in [int(socket.objectName()) for socket in self.sockets]
@Slot(int, int)
def on_connection(self, device_id: int, socket_descriptor: int):
"""Create new QTcpSocket object and setup connection with client.
Args:
device_id (int): Socket ID.
socket_descriptor (int) Socket descriptor.
Note:
Emits connected signal.
"""
socket = QTcpSocket()
socket.setParent(None) # noqa
if socket.setSocketDescriptor(socket_descriptor): # noqa
socket.readyRead.connect(lambda: self.on_message(socket)) # noqa
socket.disconnected.connect(lambda: self.on_disconnected(socket)) # noqa
socket.error.connect(lambda: self.on_error(socket)) # noqa
socket.setObjectName(str(device_id))
self.sockets.append(socket)
self.__logger.debug("New connection from CLIENT-{} "
"IP: {}:{}".format(socket.objectName(),
socket.peerAddress().toString(),
socket.peerPort()))
self.connected.emit(int(socket.objectName()), socket.peerAddress().toString(), socket.peerPort())
@Slot(QTcpSocket)
def on_message(self, conn):
"""Handle socket messages.
Note:
Emits message signal.
"""
device_id = int(conn.objectName())
while conn.bytesAvailable():
if device_id in self.data:
size_left = self.data.get(device_id).get("size_left")
message = conn.read(size_left)
size_left = size_left - len(message)
if size_left > 0:
self.data[device_id]["size_left"] = size_left
self.data[device_id]["data"] += message
else:
message = self.data.get(device_id).get("data") + message
del self.data[device_id]
self.message.emit(device_id, message)
else:
header_size = struct.calcsize('!L')
header = conn.read(header_size)
if len(header) == 4:
msg_size = struct.unpack('!L', header)[0]
message = conn.read(msg_size)
if len(message) < msg_size:
msg_size = msg_size - len(message)
self.data[device_id] = {"data": message, "size_left": msg_size}
else:
self.message.emit(device_id, message)
@Slot(QTcpSocket)
def on_disconnected(self, conn):
"""Handle socket disconnection.
Args:
conn (QTcpSocket): Socket object.
Note:
Emits disconnected signal.
"""
device_id = int(conn.objectName())
if conn in self.sockets:
try:
conn.close()
self.sockets.remove(conn)
except RuntimeError:
pass
if device_id in self.data:
del self.data[device_id]
self.__logger.info("CLIENT-{} Disconnected {}: {}:{}".format(device_id, conn.peerName(),
conn.peerAddress().toString(),
conn.peerPort()))
self.disconnected.emit(device_id)
@Slot(QTcpSocket)
def on_error(self, conn: QTcpSocket):
"""Handle socket errors.
Args:
conn (QTcpSocket): Socket object.
Note:
Emits error signal.
"""
device_id = int(conn.objectName())
e = conn.errorString()
self.__logger.error("CLIENT-{} Error: {}".format(device_id, e))
self.error.emit(device_id, str(e))
@Slot()
def close(self):
"""Close all connections.
Note:
Emits closed signal.
"""
for conn in self.sockets:
conn.close()
try:
conn.deleteLater()
self.sockets.remove(conn)
except ValueError:
pass
self.closed.emit()
class _BalancedSocketHandler(QObject):
"""Creates socket handlers threads. New sockets
are passed to worker with least load.
Outgoing signals:
- started (): Handler started.
- closed (): Handler closed all connections.
- message (client_id: int, message: bytes): Message received.
- error (client_id: int, error: str): Socket error.
- disconnected (client_id: int): Client disconnected.
Incomming signals:
- write (device_id: int, message: bytes): Emit to send message
to client with ID.
- write_all (message: bytes): Emit to send message
to all clients.
- kick (device_id: int): Emit to kick client with ID.
- close_signal (): Emit to close all connections.
"""
started = Signal()
closed = Signal()
connected = Signal(int, str, int)
message = Signal(int, bytes)
error = Signal(int, str)
disconnected = Signal(int)
write = Signal(int, bytes)
write_all = Signal(bytes)
kick = Signal(int)
close_signal = Signal()
def __init__(self, cores=None):
super(_BalancedSocketHandler, self).__init__(None)
self.cores = cores
@Slot()
def start(self):
"""Start server and create socket workers."""
self.__logger = logging.getLogger(self.__class__.__name__) # noqa
self.workers = [] # noqa
self.threads = [] # noqa
if not self.cores:
self.cores = QThread.idealThreadCount()
self.close_signal.connect(self.close, Qt.BlockingQueuedConnection)
self.write.connect(self._write)
self.write_all.connect(self._write_all)
self.kick.connect(self._kick)
self.__logger.debug("Allocating {} worker threads...".format(self.cores))
try:
for i in range(self.cores):
self.create_worker()
self.__logger.info("Started worker threads!")
self.__logger.debug("Active socket workers: {}".format(sum([1 for x in self.threads if x.isRunning()])))
self.started.emit()
except Exception as e:
self.__logger.error("Failed to start socket handler: {}".format(e))
self.close()
@Slot()
def create_worker(self):
"""Creates new socket worker in thread."""
thread = QThread()
worker = _SocketWorker()
worker.moveToThread(thread)
worker.connected.connect(self.connected.emit) # noqa
worker.message.connect(self.message.emit) # noqa
worker.disconnected.connect(self.disconnected.emit) # noqa
worker.error.connect(self.error.emit) # noqa
thread.started.connect(worker.start) # noqa
worker.closed.connect(thread.quit) # noqa
worker.closed.connect(thread.wait) # noqa
self.workers.append(worker)
self.threads.append(thread)
thread.start()
@Slot(int)
def on_incoming_connection(self, socket_descriptor: int) -> None:
"""Select thread with least sockets and setup connection.
Assign not used ID.
Args:
socket_descriptor (int): Socket descriptor.
"""
count_list = [x.socket_count() for x in self.workers]
worker_id = count_list.index(min(count_list))
device_id = self.get_free_id()
self.workers[worker_id].connection.emit(device_id, socket_descriptor)
@Slot(int, bytes)
def _write(self, device_id: int, message: bytes) -> None:
"""Write to client with ID.
Args:
device_id (int): Client ID.
message (bytes): Message.
"""
for worker in self.workers:
if worker.has_device_id(device_id):
worker.write.emit(device_id, message)
return
self.__logger.error("Could not find client with ID: {}!".format(device_id))
@Slot(int)
def _kick(self, device_id: int) -> None:
"""Kick client with ID.
Args:
device_id (int): Client ID.
"""
for worker in self.workers:
if worker.has_device_id(device_id):
worker.kick.emit(device_id)
return
self.__logger.error("Could not find client with ID: {}!".format(device_id))
@Slot(bytes)
def _write_all(self, message: bytes) -> None:
"""Write to all clients
Args:
message (bytes): Message.
"""
for worker in self.workers:
worker.write_all.emit(message)
@Slot()
def get_free_id(self) -> int:
"""Returns not used device ID."""
used = []
for i in self.workers:
used = used + i.used_ids()
used = sorted(used)
if len(used) > 0:
maxid = max(used)
for i in range(1, maxid):
if i not in used:
return i
return maxid + 1
else:
return 1
@Slot()
def close(self) -> None:
"""Close server and all socket handlers.
Note:
Emits closed signal when successfully closed.
"""
for worker in self.workers:
worker.close_signal.emit()
worker.deleteLater()
for thread in self.threads:
thread.quit()
self.__logger.debug("Socket handler closed successfully")
self.closed.emit()
class QBalancedServer(QBaseServer):
"""TCP server with constant amount of threads. When new client connects, server
checks which worker has the least amount of active sockets and passes socket
descriptor to that thread."""
def __init__(self, *args, **kwargs):
super(QBalancedServer, self).__init__(*args, **kwargs)
self.set_handler_class(_BalancedSocketHandler)
|
en
| 0.773835
|
_SocketWorker manages sockets and handles messages. Outgoing signals: - disconnected (device_id: int): Client disconnected. - connected (device_id: int, ip: str, port: int): Client connected. - message (device_id: int, message: bytes): Message from client. - error (device_id: int, error: str): Error occured. - closed (): Closed successfully. Incomming signals: - write (device_id: int, message: bytes): Emit to send message to client with ID in this worker. - write_all (message: bytes): Emit to send message to all clients in this worker. - kick (device_id: int): Emit to kick device with ID in this worker. - connection (device_id: int, socket_descriptor: int): Emit to create new socket object in this worker. - close_signal (): Emit to close all connections. Run socket worker. # noqa # noqa # noqa # noqa Executed inside worker thread after emitting kick signal. Args: device_id (int): ID of device to kick. Send task to client. DO NOT USE THIS! Emit write signal instead! Args: device_id (int): Client ID. message (bytes): Message to send. Send task to all connected clients. DO NOT USE THIS! Emit write_all signal instead! Args: message (bytes): Message to send. Returns socket object associated to provided ID. Args: device_id (int): Socket ID. Returns amount of active sockets. Returns IDs used by this worker. Check if this thread has socket with ID. Args: device_id (int): Socket ID. Create new QTcpSocket object and setup connection with client. Args: device_id (int): Socket ID. socket_descriptor (int) Socket descriptor. Note: Emits connected signal. # noqa # noqa # noqa # noqa # noqa Handle socket messages. Note: Emits message signal. Handle socket disconnection. Args: conn (QTcpSocket): Socket object. Note: Emits disconnected signal. Handle socket errors. Args: conn (QTcpSocket): Socket object. Note: Emits error signal. Close all connections. Note: Emits closed signal. Creates socket handlers threads. New sockets are passed to worker with least load. Outgoing signals: - started (): Handler started. - closed (): Handler closed all connections. - message (client_id: int, message: bytes): Message received. - error (client_id: int, error: str): Socket error. - disconnected (client_id: int): Client disconnected. Incomming signals: - write (device_id: int, message: bytes): Emit to send message to client with ID. - write_all (message: bytes): Emit to send message to all clients. - kick (device_id: int): Emit to kick client with ID. - close_signal (): Emit to close all connections. Start server and create socket workers. # noqa # noqa # noqa Creates new socket worker in thread. # noqa # noqa # noqa # noqa # noqa # noqa # noqa Select thread with least sockets and setup connection. Assign not used ID. Args: socket_descriptor (int): Socket descriptor. Write to client with ID. Args: device_id (int): Client ID. message (bytes): Message. Kick client with ID. Args: device_id (int): Client ID. Write to all clients Args: message (bytes): Message. Returns not used device ID. Close server and all socket handlers. Note: Emits closed signal when successfully closed. TCP server with constant amount of threads. When new client connects, server checks which worker has the least amount of active sockets and passes socket descriptor to that thread.
| 2.554064
| 3
|
modules/trainer/updater/mmd.py
|
nobodykid/sinkhorngan-positive
| 0
|
6625576
|
from base import BaseUpdater
class GAN(BaseUpdater):
n_critic = 5
def update_parameters(self, idx, x, y):
if self.get_counter('generator') < 25 or self.get_counter('generator') % 500 == 0:
self.n_critic = 100
else:
self.n_critic = 5
loss_generator = None
loss_critic = None
try:
for p in self.model["critic"].encoder.parameters():
p.data.clamp_(-0.01, 0.01)
except:
for p in self.model["critic"].parameters():
p.data.clamp_(-0.01, 0.01)
# Init noise
z_mean = self.configs.get('mean', 0)
z_std = self.configs.get('std', 1)
z = self.Tensor.randn(x.shape[0], self.configs['z_dim'], mean=z_mean, std=z_std)
# Set critic grad true
for p in self.model["critic"].parameters():
p.requires_grad = True
self.model.zero_grad("critic")
loss_critic = self.trainer.critic['criterion'](z=z, x=x, y=y)
loss_critic.backward(self.Tensor.MoneFloat)
self.model.step("critic")
self.model.zero_grad("generator")
if self.get_counter('critic') % self.n_critic == 0:
# Set critic grad false
for p in self.model["critic"].parameters():
p.requires_grad = False
loss_generator = self.trainer.generator['criterion'](z=z, x=x, y=y)
loss_generator.backward(self.Tensor.OneFloat)
self.model.step("generator")
logs_scalar = {
'generator_loss_total': loss_generator.item() if loss_generator else None,
'critic_loss_total': loss_critic.item()
}
self.logger(Scalar=logs_scalar)
|
from base import BaseUpdater
class GAN(BaseUpdater):
n_critic = 5
def update_parameters(self, idx, x, y):
if self.get_counter('generator') < 25 or self.get_counter('generator') % 500 == 0:
self.n_critic = 100
else:
self.n_critic = 5
loss_generator = None
loss_critic = None
try:
for p in self.model["critic"].encoder.parameters():
p.data.clamp_(-0.01, 0.01)
except:
for p in self.model["critic"].parameters():
p.data.clamp_(-0.01, 0.01)
# Init noise
z_mean = self.configs.get('mean', 0)
z_std = self.configs.get('std', 1)
z = self.Tensor.randn(x.shape[0], self.configs['z_dim'], mean=z_mean, std=z_std)
# Set critic grad true
for p in self.model["critic"].parameters():
p.requires_grad = True
self.model.zero_grad("critic")
loss_critic = self.trainer.critic['criterion'](z=z, x=x, y=y)
loss_critic.backward(self.Tensor.MoneFloat)
self.model.step("critic")
self.model.zero_grad("generator")
if self.get_counter('critic') % self.n_critic == 0:
# Set critic grad false
for p in self.model["critic"].parameters():
p.requires_grad = False
loss_generator = self.trainer.generator['criterion'](z=z, x=x, y=y)
loss_generator.backward(self.Tensor.OneFloat)
self.model.step("generator")
logs_scalar = {
'generator_loss_total': loss_generator.item() if loss_generator else None,
'critic_loss_total': loss_critic.item()
}
self.logger(Scalar=logs_scalar)
|
en
| 0.272483
|
# Init noise # Set critic grad true # Set critic grad false
| 2.198056
| 2
|
tests/core/helpers.py
|
cd4761/ecc-py-evm
| 1
|
6625577
|
<filename>tests/core/helpers.py
import sys
from eth_utils.toolz import curry
import pytest
from eth_utils import (
decode_hex,
ValidationError,
)
from eth.chains.base import MiningChain
from eth.vm.spoof import (
SpoofTransaction,
)
greater_equal_python36 = pytest.mark.skipif(
sys.version_info < (3, 6),
reason="requires python3.6 or higher"
)
@curry
def new_transaction(
vm,
from_,
to,
amount=0,
private_key=None,
gas_price=10,
gas=100000,
data=b'',
chain_id=None):
"""
Create and return a transaction sending amount from <from_> to <to>.
The transaction will be signed with the given private key.
"""
nonce = vm.state.get_nonce(from_)
tx = vm.create_unsigned_transaction(
nonce=nonce,
gas_price=gas_price,
gas=gas,
to=to,
value=amount,
data=data,
)
if private_key:
if chain_id is None:
return tx.as_signed_transaction(private_key)
else:
return tx.as_signed_transaction(private_key, chain_id=chain_id)
else:
return SpoofTransaction(tx, from_=from_)
def fill_block(chain, from_, key, gas, data):
if not isinstance(chain, MiningChain):
pytest.skip("Cannot fill block automatically unless using a MiningChain")
return
recipient = decode_hex('0xa94f5374fce5edbc8e2a8697c15331677e6ebf0c')
amount = 100
vm = chain.get_vm()
assert vm.get_header().gas_used == 0
while True:
tx = new_transaction(chain.get_vm(), from_, recipient, amount, key, gas=gas, data=data)
try:
chain.apply_transaction(tx)
except ValidationError as exc:
if str(exc).startswith("Transaction exceeds gas limit"):
break
else:
raise exc
assert chain.get_vm().get_block().header.gas_used > 0
|
<filename>tests/core/helpers.py
import sys
from eth_utils.toolz import curry
import pytest
from eth_utils import (
decode_hex,
ValidationError,
)
from eth.chains.base import MiningChain
from eth.vm.spoof import (
SpoofTransaction,
)
greater_equal_python36 = pytest.mark.skipif(
sys.version_info < (3, 6),
reason="requires python3.6 or higher"
)
@curry
def new_transaction(
vm,
from_,
to,
amount=0,
private_key=None,
gas_price=10,
gas=100000,
data=b'',
chain_id=None):
"""
Create and return a transaction sending amount from <from_> to <to>.
The transaction will be signed with the given private key.
"""
nonce = vm.state.get_nonce(from_)
tx = vm.create_unsigned_transaction(
nonce=nonce,
gas_price=gas_price,
gas=gas,
to=to,
value=amount,
data=data,
)
if private_key:
if chain_id is None:
return tx.as_signed_transaction(private_key)
else:
return tx.as_signed_transaction(private_key, chain_id=chain_id)
else:
return SpoofTransaction(tx, from_=from_)
def fill_block(chain, from_, key, gas, data):
if not isinstance(chain, MiningChain):
pytest.skip("Cannot fill block automatically unless using a MiningChain")
return
recipient = decode_hex('0xa94f5374fce5edbc8e2a8697c15331677e6ebf0c')
amount = 100
vm = chain.get_vm()
assert vm.get_header().gas_used == 0
while True:
tx = new_transaction(chain.get_vm(), from_, recipient, amount, key, gas=gas, data=data)
try:
chain.apply_transaction(tx)
except ValidationError as exc:
if str(exc).startswith("Transaction exceeds gas limit"):
break
else:
raise exc
assert chain.get_vm().get_block().header.gas_used > 0
|
en
| 0.872475
|
Create and return a transaction sending amount from <from_> to <to>. The transaction will be signed with the given private key.
| 2.046947
| 2
|
core/api.py
|
uktrade/lite-internal-frontend
| 4
|
6625578
|
from django.http import JsonResponse
from django.views.generic import TemplateView
from queues.services import get_cases_search_data
class Cases(TemplateView):
def get(self, request, **kwargs):
"""
Endpoint to enable access to the API /cases/ endpoint
"""
hidden = request.GET.get("hidden")
params = {"page": int(request.GET.get("page", 1))}
for key, value in request.GET.items():
if key != "flags[]":
params[key] = value
params["flags"] = request.GET.getlist("flags[]", [])
if hidden:
params["hidden"] = hidden
data = get_cases_search_data(request, kwargs["pk"], params)
return JsonResponse(data=data)
|
from django.http import JsonResponse
from django.views.generic import TemplateView
from queues.services import get_cases_search_data
class Cases(TemplateView):
def get(self, request, **kwargs):
"""
Endpoint to enable access to the API /cases/ endpoint
"""
hidden = request.GET.get("hidden")
params = {"page": int(request.GET.get("page", 1))}
for key, value in request.GET.items():
if key != "flags[]":
params[key] = value
params["flags"] = request.GET.getlist("flags[]", [])
if hidden:
params["hidden"] = hidden
data = get_cases_search_data(request, kwargs["pk"], params)
return JsonResponse(data=data)
|
en
| 0.580956
|
Endpoint to enable access to the API /cases/ endpoint
| 2.150312
| 2
|
unknowntags/models.py
|
leonrenkema/makerspaceleiden-crm
| 5
|
6625579
|
<reponame>leonrenkema/makerspaceleiden-crm
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils import timezone
from members.models import Tag, clean_tag_string
from acl.models import Entitlement, PermitType
import datetime
class Unknowntag(models.Model):
tag = models.CharField(max_length=30)
last_used = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
days = settings.UT_DAYS_CUTOFF
cutoff = timezone.now() - datetime.timedelta(days=days)
stale_tags = Unknowntag.objects.all().filter(Q(last_used__lt=cutoff))
for stale_tag in stale_tags:
stale_tag.delete()
return super(Unknowntag, self).save(*args, **kwargs)
def __str__(self):
return "{} swiped on {}".format(
self.tag, self.last_used.strftime("%Y-%m-%d %H:%M:%S")
)
def reassing_to_user(self, user, admin, activate=False):
newtag = Tag.objects.create(
tag=self.tag,
owner=user,
description="The card that was added on {} by {} ".format(
datetime.date.today(), admin
),
)
newtag.changeReason = (
"Moved from the unknown tags list by {} to this user.".format(admin)
)
newtag.save()
self.changeReason = "Reassigned to user {} by {}".format(user, admin)
self.delete()
if activate:
doors = PermitType.objects.get(pk=settings.DOORS)
e, created = Entitlement.objects.get_or_create(
active=True, permit=doors, holder=user, issuer=admin
)
if created:
e.changeReason = "Auto created during reasign of what was an unknown tag to {} by {}".format(
user, admin
)
e.save()
return newtag
|
from django.conf import settings
from django.db import models
from django.db.models import Q
from django.utils import timezone
from members.models import Tag, clean_tag_string
from acl.models import Entitlement, PermitType
import datetime
class Unknowntag(models.Model):
tag = models.CharField(max_length=30)
last_used = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
days = settings.UT_DAYS_CUTOFF
cutoff = timezone.now() - datetime.timedelta(days=days)
stale_tags = Unknowntag.objects.all().filter(Q(last_used__lt=cutoff))
for stale_tag in stale_tags:
stale_tag.delete()
return super(Unknowntag, self).save(*args, **kwargs)
def __str__(self):
return "{} swiped on {}".format(
self.tag, self.last_used.strftime("%Y-%m-%d %H:%M:%S")
)
def reassing_to_user(self, user, admin, activate=False):
newtag = Tag.objects.create(
tag=self.tag,
owner=user,
description="The card that was added on {} by {} ".format(
datetime.date.today(), admin
),
)
newtag.changeReason = (
"Moved from the unknown tags list by {} to this user.".format(admin)
)
newtag.save()
self.changeReason = "Reassigned to user {} by {}".format(user, admin)
self.delete()
if activate:
doors = PermitType.objects.get(pk=settings.DOORS)
e, created = Entitlement.objects.get_or_create(
active=True, permit=doors, holder=user, issuer=admin
)
if created:
e.changeReason = "Auto created during reasign of what was an unknown tag to {} by {}".format(
user, admin
)
e.save()
return newtag
|
none
| 1
| 2.19141
| 2
|
|
pandas/tests/series/indexing/test_numeric.py
|
emadshihab/pandas
| 1
|
6625580
|
import numpy as np
import pytest
from pandas import DataFrame, Index, Series
import pandas._testing as tm
def test_delitem():
# GH 5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# empty
s = Series(dtype=object)
with pytest.raises(KeyError, match=r"^0$"):
del s[0]
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s, Series(1))
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
# Index(dtype=object)
s = Series(1, index=["a"])
del s["a"]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
s["a"] = 1
tm.assert_series_equal(s, Series(1, index=["a"]))
del s["a"]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
def test_slice_float64():
values = np.arange(10.0, 50.0, 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
tm.assert_series_equal(result, expected)
result = s.loc[start:end]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_getitem_negative_out_of_bounds():
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
s[-11]
with pytest.raises(IndexError, match=msg):
s[-11] = "foo"
def test_getitem_regression():
s = Series(range(5), index=list(range(5)))
result = s[list(range(5))]
tm.assert_series_equal(result, s)
def test_getitem_setitem_slice_bug():
s = Series(range(10), index=list(range(10)))
result = s[-12:]
tm.assert_series_equal(result, s)
result = s[-7:]
tm.assert_series_equal(result, s[3:])
result = s[:-12]
tm.assert_series_equal(result, s[:0])
s = Series(range(10), index=list(range(10)))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_setitem_slice_integers():
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
tm.assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_setitem_float_labels():
# note labels are floats
s = Series(["a", "b", "c"], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = "zoo"
tmp.iloc[2] = "zoo"
tm.assert_series_equal(s, tmp)
def test_slice_float_get_set(datetime_series):
msg = (
r"cannot do slice indexing on <class 'pandas\.core\.indexes"
r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
r"of <class 'float'>"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0] = 0
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0] = 0
def test_slice_floats2():
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_int_indexing():
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
with pytest.raises(KeyError, match=r"^5$"):
s[5]
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
with pytest.raises(KeyError, match=r"^5$"):
s[5]
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
def test_getitem_int64(datetime_series):
idx = np.int64(5)
assert datetime_series[idx] == datetime_series[5]
|
import numpy as np
import pytest
from pandas import DataFrame, Index, Series
import pandas._testing as tm
def test_delitem():
# GH 5542
# should delete the item inplace
s = Series(range(5))
del s[0]
expected = Series(range(1, 5), index=range(1, 5))
tm.assert_series_equal(s, expected)
del s[1]
expected = Series(range(2, 5), index=range(2, 5))
tm.assert_series_equal(s, expected)
# empty
s = Series(dtype=object)
with pytest.raises(KeyError, match=r"^0$"):
del s[0]
# only 1 left, del, add, del
s = Series(1)
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
s[0] = 1
tm.assert_series_equal(s, Series(1))
del s[0]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="int64")))
# Index(dtype=object)
s = Series(1, index=["a"])
del s["a"]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
s["a"] = 1
tm.assert_series_equal(s, Series(1, index=["a"]))
del s["a"]
tm.assert_series_equal(s, Series(dtype="int64", index=Index([], dtype="object")))
def test_slice_float64():
values = np.arange(10.0, 50.0, 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
tm.assert_series_equal(result, expected)
result = s.loc[start:end]
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_getitem_negative_out_of_bounds():
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
msg = "index -11 is out of bounds for axis 0 with size 10"
with pytest.raises(IndexError, match=msg):
s[-11]
with pytest.raises(IndexError, match=msg):
s[-11] = "foo"
def test_getitem_regression():
s = Series(range(5), index=list(range(5)))
result = s[list(range(5))]
tm.assert_series_equal(result, s)
def test_getitem_setitem_slice_bug():
s = Series(range(10), index=list(range(10)))
result = s[-12:]
tm.assert_series_equal(result, s)
result = s[-7:]
tm.assert_series_equal(result, s[3:])
result = s[:-12]
tm.assert_series_equal(result, s[:0])
s = Series(range(10), index=list(range(10)))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_setitem_slice_integers():
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
tm.assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_setitem_float_labels():
# note labels are floats
s = Series(["a", "b", "c"], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = "zoo"
tmp.iloc[2] = "zoo"
tm.assert_series_equal(s, tmp)
def test_slice_float_get_set(datetime_series):
msg = (
r"cannot do slice indexing on <class 'pandas\.core\.indexes"
r"\.datetimes\.DatetimeIndex'> with these indexers \[{key}\] "
r"of <class 'float'>"
)
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.0")):
datetime_series[4.0:10.0] = 0
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0]
with pytest.raises(TypeError, match=msg.format(key=r"4\.5")):
datetime_series[4.5:10.0] = 0
def test_slice_floats2():
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_int_indexing():
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
with pytest.raises(KeyError, match=r"^5$"):
s[5]
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
with pytest.raises(KeyError, match=r"^5$"):
s[5]
with pytest.raises(KeyError, match=r"^'c'$"):
s["c"]
def test_getitem_int64(datetime_series):
idx = np.int64(5)
assert datetime_series[idx] == datetime_series[5]
|
en
| 0.652311
|
# GH 5542 # should delete the item inplace # empty # only 1 left, del, add, del # Index(dtype=object) # note labels are floats # not monotonic
| 2.452988
| 2
|
official/nlp/configs/encoders.py
|
bamdada/UdacityProj10FinaltfModels
| 2
|
6625581
|
<reponame>bamdada/UdacityProj10FinaltfModels
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer Encoders.
Includes configurations and instantiation methods.
"""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.modeling import networks
@dataclasses.dataclass
class TransformerEncoderConfig(base_config.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
def instantiate_encoder_from_cfg(
config: TransformerEncoderConfig) -> networks.TransformerEncoder:
"""Instantiate a Transformer encoder network from TransformerEncoderConfig."""
encoder_network = networks.TransformerEncoder(
vocab_size=config.vocab_size,
hidden_size=config.hidden_size,
num_layers=config.num_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
activation=tf_utils.get_activation(config.hidden_activation),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
sequence_length=None,
max_sequence_length=config.max_position_embeddings,
type_vocab_size=config.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=config.initializer_range))
return encoder_network
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transformer Encoders.
Includes configurations and instantiation methods.
"""
import dataclasses
import tensorflow as tf
from official.modeling import tf_utils
from official.modeling.hyperparams import base_config
from official.nlp.modeling import networks
@dataclasses.dataclass
class TransformerEncoderConfig(base_config.Config):
"""BERT encoder configuration."""
vocab_size: int = 30522
hidden_size: int = 768
num_layers: int = 12
num_attention_heads: int = 12
hidden_activation: str = "gelu"
intermediate_size: int = 3072
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
max_position_embeddings: int = 512
type_vocab_size: int = 2
initializer_range: float = 0.02
def instantiate_encoder_from_cfg(
config: TransformerEncoderConfig) -> networks.TransformerEncoder:
"""Instantiate a Transformer encoder network from TransformerEncoderConfig."""
encoder_network = networks.TransformerEncoder(
vocab_size=config.vocab_size,
hidden_size=config.hidden_size,
num_layers=config.num_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
activation=tf_utils.get_activation(config.hidden_activation),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
sequence_length=None,
max_sequence_length=config.max_position_embeddings,
type_vocab_size=config.type_vocab_size,
initializer=tf.keras.initializers.TruncatedNormal(
stddev=config.initializer_range))
return encoder_network
|
en
| 0.787294
|
# Lint as: python3 # Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Transformer Encoders. Includes configurations and instantiation methods. BERT encoder configuration. Instantiate a Transformer encoder network from TransformerEncoderConfig.
| 2.010178
| 2
|
src/sentry/models/commitauthor.py
|
learninto/sentry
| 1
|
6625582
|
from __future__ import absolute_import, print_function
from django.db import models
from sentry.db.models import BoundedPositiveIntegerField, Model, sane_repr
class CommitAuthor(Model):
__core__ = False
organization_id = BoundedPositiveIntegerField(db_index=True)
name = models.CharField(max_length=128, null=True)
email = models.EmailField(max_length=75)
external_id = models.CharField(max_length=164, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_commitauthor"
unique_together = (("organization_id", "email"), ("organization_id", "external_id"))
__repr__ = sane_repr("organization_id", "email", "name")
def find_users(self):
from sentry.models import User
return User.objects.filter(
emails__email__iexact=self.email,
emails__is_verified=True,
sentry_orgmember_set__organization=self.organization_id,
is_active=True,
)
|
from __future__ import absolute_import, print_function
from django.db import models
from sentry.db.models import BoundedPositiveIntegerField, Model, sane_repr
class CommitAuthor(Model):
__core__ = False
organization_id = BoundedPositiveIntegerField(db_index=True)
name = models.CharField(max_length=128, null=True)
email = models.EmailField(max_length=75)
external_id = models.CharField(max_length=164, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_commitauthor"
unique_together = (("organization_id", "email"), ("organization_id", "external_id"))
__repr__ = sane_repr("organization_id", "email", "name")
def find_users(self):
from sentry.models import User
return User.objects.filter(
emails__email__iexact=self.email,
emails__is_verified=True,
sentry_orgmember_set__organization=self.organization_id,
is_active=True,
)
|
none
| 1
| 2.227211
| 2
|
|
pearwise/pearwise.py
|
omarchehab98/open.kattis.com-problems
| 1
|
6625583
|
<gh_stars>1-10
def parseBallot(line):
p, ballot = line.split()
return int(p), dict(zip(map(lambda x: ord(x) - ord('A'), ballot), list(range(len(ballot)))))
n, m = map(int, input().split())
ballots = list(map(parseBallot, [input() for _ in range(m)]))
graph = dict(zip(list(range(n)), [set() for _ in range(n)]))
for v in range(n):
for w in range(v + 1, n):
i_p, j_p = 0, 0
for p, ballot in ballots:
if ballot[v] < ballot[w]:
i_p += p
else:
j_p += p
graph[v].add(w) if i_p > j_p else graph[w].add(v)
reversed_graph = dict(zip(list(range(n)), [set() for _ in range(n)]))
for v in graph.keys():
for w in graph.keys():
if v != w and v in graph[w]:
reversed_graph[v].add(w)
def dfs(graph, v, visited):
visited.add(v)
for w in graph[v]:
if w not in visited:
dfs(graph, w, visited)
for i in range(n):
beats = set()
dfs(graph, i, beats)
beaten_by = reversed_graph[i]
print("{}: can{} win".format(chr(ord('A') + i), '' if beaten_by.issubset(beats) else "'t"))
|
def parseBallot(line):
p, ballot = line.split()
return int(p), dict(zip(map(lambda x: ord(x) - ord('A'), ballot), list(range(len(ballot)))))
n, m = map(int, input().split())
ballots = list(map(parseBallot, [input() for _ in range(m)]))
graph = dict(zip(list(range(n)), [set() for _ in range(n)]))
for v in range(n):
for w in range(v + 1, n):
i_p, j_p = 0, 0
for p, ballot in ballots:
if ballot[v] < ballot[w]:
i_p += p
else:
j_p += p
graph[v].add(w) if i_p > j_p else graph[w].add(v)
reversed_graph = dict(zip(list(range(n)), [set() for _ in range(n)]))
for v in graph.keys():
for w in graph.keys():
if v != w and v in graph[w]:
reversed_graph[v].add(w)
def dfs(graph, v, visited):
visited.add(v)
for w in graph[v]:
if w not in visited:
dfs(graph, w, visited)
for i in range(n):
beats = set()
dfs(graph, i, beats)
beaten_by = reversed_graph[i]
print("{}: can{} win".format(chr(ord('A') + i), '' if beaten_by.issubset(beats) else "'t"))
|
none
| 1
| 3.280928
| 3
|
|
storytelling/storytelling/generate_story_from_img.py
|
Joyce-yanqiongzhang/proj2_storytelling
| 0
|
6625584
|
from numpy.core.fromnumeric import argmax
from . import predict
img_paths = ['/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853089.6942558user1.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853095.5605984user2.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853100.6583555user3.png']
# get attributes of the users' faces
class User_face:
def __init__(self, user_number, gender, age1, age2, attributes):
self.user_number = user_number
self.gender = gender
self.age1 = age1
self.age2 = age2
self.attributes = attributes
self.score = []
class Character:
def __init__(self, index, name, gender, gender_weight, age1, age2, age_weight, attributes, attribute_weight):
self.index = index
self.name = name
self.gender = gender
self.gender_weight = gender_weight
self.age1 = age1
self.age2 = age2
self.age_weight = age_weight
self.attributes = attributes
self.attribute_weight = attribute_weight
prince = Character(0, 'prince', 'Male', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
princess = Character(1, 'princess', 'Female', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
wizard = Character(2, 'wizard', 'Male', 10, 30, 80, 50, ['Chubby', 'Eeyeglasses', 'Bags_Under_Eyes', 'Mustache', 'Heavy_Makeup', 'Goatee'], 25)
rabbit = Character(3, 'Rabbit', 'Female', 10, 0, 10, 50, ['Chubby', 'Young', 'Pale_Skin', 'Wearing_Lipstick'], 30)
character_set = [prince, princess, wizard, rabbit]
def calculate_score(user_face):
def calculate_single(user_face, character):
single_score = 0
if user_face.gender == character.gender:
single_score += character.gender_weight
if user_face.age1 in range(character.age1, character.age2+1) or user_face.age2 in range(character.age1, character.age2+1):
single_score += character.age_weight
for attr in user_face.attributes:
if attr in character.attributes:
single_score += character.attribute_weight
return single_score
user_score = []
for cha in character_set:
user_score.append(calculate_single(user_face, cha))
return user_score
def character_mapping(img_paths):
user_face_set = []
for i, img_path in enumerate(img_paths):
gender, age1, age2, attributes = predict.get_attributes(img_path)
user_face_set.append(User_face(i+1, gender, int(age1), int(age2), attributes))
user_scores = []
for user in user_face_set:
user_score = calculate_score(user)
user.score = user_score
print("user " + str(user.user_number) + " got the scores for prince, princess, wizard, rabbit :", user_score)
user_scores.append(user_score)
print(user_scores)
user_characters = []
for user_score in user_scores:
selected_index = user_score.index(max(user_score))
selected = character_set[selected_index]
user_score_co = user_score.copy()
while selected.name in user_characters:
user_score_co[selected_index] = -1
selected_index = user_score_co.index(max(user_score_co))
selected = character_set[selected_index]
user_characters.append(selected.name)
print(user_characters)
return user_characters, user_face_set
|
from numpy.core.fromnumeric import argmax
from . import predict
img_paths = ['/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853089.6942558user1.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853095.5605984user2.png',
'/home/serena/Desktop/proj2_storytelling/storytelling/statics/uploaded_imgs/1609853100.6583555user3.png']
# get attributes of the users' faces
class User_face:
def __init__(self, user_number, gender, age1, age2, attributes):
self.user_number = user_number
self.gender = gender
self.age1 = age1
self.age2 = age2
self.attributes = attributes
self.score = []
class Character:
def __init__(self, index, name, gender, gender_weight, age1, age2, age_weight, attributes, attribute_weight):
self.index = index
self.name = name
self.gender = gender
self.gender_weight = gender_weight
self.age1 = age1
self.age2 = age2
self.age_weight = age_weight
self.attributes = attributes
self.attribute_weight = attribute_weight
prince = Character(0, 'prince', 'Male', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
princess = Character(1, 'princess', 'Female', 50, 5, 25, 25, ['Attractive', 'Young', 'Oval_Face'], 15)
wizard = Character(2, 'wizard', 'Male', 10, 30, 80, 50, ['Chubby', 'Eeyeglasses', 'Bags_Under_Eyes', 'Mustache', 'Heavy_Makeup', 'Goatee'], 25)
rabbit = Character(3, 'Rabbit', 'Female', 10, 0, 10, 50, ['Chubby', 'Young', 'Pale_Skin', 'Wearing_Lipstick'], 30)
character_set = [prince, princess, wizard, rabbit]
def calculate_score(user_face):
def calculate_single(user_face, character):
single_score = 0
if user_face.gender == character.gender:
single_score += character.gender_weight
if user_face.age1 in range(character.age1, character.age2+1) or user_face.age2 in range(character.age1, character.age2+1):
single_score += character.age_weight
for attr in user_face.attributes:
if attr in character.attributes:
single_score += character.attribute_weight
return single_score
user_score = []
for cha in character_set:
user_score.append(calculate_single(user_face, cha))
return user_score
def character_mapping(img_paths):
user_face_set = []
for i, img_path in enumerate(img_paths):
gender, age1, age2, attributes = predict.get_attributes(img_path)
user_face_set.append(User_face(i+1, gender, int(age1), int(age2), attributes))
user_scores = []
for user in user_face_set:
user_score = calculate_score(user)
user.score = user_score
print("user " + str(user.user_number) + " got the scores for prince, princess, wizard, rabbit :", user_score)
user_scores.append(user_score)
print(user_scores)
user_characters = []
for user_score in user_scores:
selected_index = user_score.index(max(user_score))
selected = character_set[selected_index]
user_score_co = user_score.copy()
while selected.name in user_characters:
user_score_co[selected_index] = -1
selected_index = user_score_co.index(max(user_score_co))
selected = character_set[selected_index]
user_characters.append(selected.name)
print(user_characters)
return user_characters, user_face_set
|
en
| 0.736942
|
# get attributes of the users' faces
| 2.540664
| 3
|
redhawk/common/parser.py
|
JordanMilne/Redhawk
| 0
|
6625585
|
""" Parser base class for the various langauge implementations.
Various language implementations have to
a) provide the GetTreeConverterClass() method for their language.
b) provide the Parse Method for their language.
"""
from . import tree_converter as T
class Parser:
def GetTreeConverterClass(self):
raise NotImplementedError(
"GetTreeConverterClass method not implemented in the Parser base class.")
def Parse(self, filename):
""" Parse filename. """
raise NotImplementedError(
"Parse is not implemented in the Parser base class.")
def _Get_Converter(self, filename):
converter_class = self.GetTreeConverterClass()
assert(issubclass(converter_class, T.TreeConverter))
converter = converter_class(filename)
return converter
def Convert(self, ast, filename=None):
""" Convert language specific AST to the LAST """
return self._Get_Converter(filename).Convert(ast)
def GetLAST(self, filename):
""" Return the language agnostic abstract syntax tree for filename."""
assert(filename != None)
converter = self._Get_Converter(filename)
return converter.Convert(self.Parse(filename))
|
""" Parser base class for the various langauge implementations.
Various language implementations have to
a) provide the GetTreeConverterClass() method for their language.
b) provide the Parse Method for their language.
"""
from . import tree_converter as T
class Parser:
def GetTreeConverterClass(self):
raise NotImplementedError(
"GetTreeConverterClass method not implemented in the Parser base class.")
def Parse(self, filename):
""" Parse filename. """
raise NotImplementedError(
"Parse is not implemented in the Parser base class.")
def _Get_Converter(self, filename):
converter_class = self.GetTreeConverterClass()
assert(issubclass(converter_class, T.TreeConverter))
converter = converter_class(filename)
return converter
def Convert(self, ast, filename=None):
""" Convert language specific AST to the LAST """
return self._Get_Converter(filename).Convert(ast)
def GetLAST(self, filename):
""" Return the language agnostic abstract syntax tree for filename."""
assert(filename != None)
converter = self._Get_Converter(filename)
return converter.Convert(self.Parse(filename))
|
en
| 0.772234
|
Parser base class for the various langauge implementations. Various language implementations have to a) provide the GetTreeConverterClass() method for their language. b) provide the Parse Method for their language. Parse filename. Convert language specific AST to the LAST Return the language agnostic abstract syntax tree for filename.
| 3.41055
| 3
|
locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_bird_texture-1.py
|
tkoyama010/pyvista-doc-translations
| 4
|
6625586
|
<filename>locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_bird_texture-1.py
from pyvista import examples
dataset = examples.download_bird_texture() # doctest:+SKIP
|
<filename>locale/pot/api/examples/_autosummary/pyvista-examples-downloads-download_bird_texture-1.py
from pyvista import examples
dataset = examples.download_bird_texture() # doctest:+SKIP
|
en
| 0.408635
|
# doctest:+SKIP
| 1.388181
| 1
|
python/hongong/ch07/07_1.py
|
gangserver/py_test
| 0
|
6625587
|
from tensorflow import keras
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_validate
import numpy as np
from sklearn.model_selection import train_test_split
(train_input, train_target), (test_input, test_target) =\
keras.datasets.fashion_mnist.load_data()
print(train_input.shape, train_target.shape)
print(test_input.shape, test_target.shape)
fig, axs = plt.subplots(1, 10, figsize=(10, 10))
for i in range(10):
axs[i].imshow(train_input[i], cmap='gray_r')
axs[i].axis('off')
plt.show()
print([train_target[i] for i in range(10)])
print(np.unique(train_target, return_counts=True))
train_scaled = train_input / 255.0
train_scaled = train_scaled.reshape(-1, 28*28)
print(train_scaled.shape)
sc = SGDClassifier(loss='log', max_iter=5, random_state=42)
scores = cross_validate(sc, train_scaled, train_target, n_jobs=-1)
print(np.mean(scores['test_score']))
# 인공 신경망
train_scaled, val_scaled, train_target, val_target =\
train_test_split(train_scaled, train_target, test_size=0.2, random_state=42)
print(train_scaled.shape, train_target.shape)
print(val_scaled.shape, val_target.shape)
dense = keras.layers.Dense(10, activation='softmax', input_shape=(784,))
model = keras.Sequential(dense)
print(train_target[:10])
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)
|
from tensorflow import keras
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import cross_validate
import numpy as np
from sklearn.model_selection import train_test_split
(train_input, train_target), (test_input, test_target) =\
keras.datasets.fashion_mnist.load_data()
print(train_input.shape, train_target.shape)
print(test_input.shape, test_target.shape)
fig, axs = plt.subplots(1, 10, figsize=(10, 10))
for i in range(10):
axs[i].imshow(train_input[i], cmap='gray_r')
axs[i].axis('off')
plt.show()
print([train_target[i] for i in range(10)])
print(np.unique(train_target, return_counts=True))
train_scaled = train_input / 255.0
train_scaled = train_scaled.reshape(-1, 28*28)
print(train_scaled.shape)
sc = SGDClassifier(loss='log', max_iter=5, random_state=42)
scores = cross_validate(sc, train_scaled, train_target, n_jobs=-1)
print(np.mean(scores['test_score']))
# 인공 신경망
train_scaled, val_scaled, train_target, val_target =\
train_test_split(train_scaled, train_target, test_size=0.2, random_state=42)
print(train_scaled.shape, train_target.shape)
print(val_scaled.shape, val_target.shape)
dense = keras.layers.Dense(10, activation='softmax', input_shape=(784,))
model = keras.Sequential(dense)
print(train_target[:10])
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
model.evaluate(val_scaled, val_target)
|
none
| 1
| 2.951869
| 3
|
|
sct_custom/spinalcordtoolbox/scripts/sct_straighten_spinalcord.py
|
nidebroux/lumbosacral_segmentation
| 1
|
6625588
|
<reponame>nidebroux/lumbosacral_segmentation
# !/usr/bin/env python
#
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: <NAME>, <NAME>, <NAME>
# Modified: 2014-09-01
#
# License: see the LICENSE.TXT
# ======================================================================================================================
import sys
import os
from spinalcordtoolbox.straightening import SpinalCordStraightener
from spinalcordtoolbox.centerline.core import ParamCenterline
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
def get_parser():
parser = SCTArgumentParser(
description="This program takes as input an anatomic image and the spinal cord centerline (or "
"segmentation), and returns the an image of a straightened spinal cord. Reference: "
"<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "
"<NAME>. Topologically-preserving straightening of spinal cord MRI. J Magn "
"Reson Imaging. 2017 Oct;46(4):1209-1219"
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help='Input image with curved spinal cord. Example: "t2.nii.gz"',
required=True)
mandatory.add_argument(
"-s",
metavar=Metavar.file,
help='Spinal cord centerline (or segmentation) of the input image. To obtain the centerline, you can use '
'sct_get_centerline. To obtain the segmentation you can use sct_propseg or sct_deepseg_sc. '
'Example: centerline.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
"-dest",
metavar=Metavar.file,
help="Spinal cord centerline (or segmentation) of a destination image (which could be "
"straight or curved). An algorithm scales the length of the input centerline to match that of the "
"destination centerline. If using -ldisc-input and -ldisc-dest with this parameter, "
"instead of linear scaling, the source centerline will be non-linearly matched so "
"that the inter-vertebral discs of the input image will match that of the "
"destination image. This feature is particularly useful for registering to a "
"template while accounting for disc alignment.",
required=False)
optional.add_argument(
"-ldisc-input",
metavar=Metavar.file,
help="Labels located at the posterior edge of the intervertebral discs, for the input "
"image (-i). All disc covering the region of interest should be provided. Exmaple: if "
"you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,"
"6,7). More details about label creation at "
"http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/. " # TODO (Julien) update this link
"This option must be used with the -ldisc-dest parameter.",
required=False)
optional.add_argument(
"-ldisc-dest",
metavar=Metavar.file,
help="Labels located at the posterior edge of the intervertebral discs, for the destination file (-dest). "
"The same comments as in -ldisc-input apply. This option must be used with the -ldisc-input parameter.",
required=False)
optional.add_argument(
"-disable-straight2curved",
action='store_true',
help="Disable straight to curved transformation computation, in case you do not need the "
"output warping field straight-->curve (faster).",
required=False)
optional.add_argument(
"-disable-curved2straight",
action='store_true',
help="Disable curved to straight transformation computation, in case you do not need the "
"output warping field curve-->straight (faster).",
required=False)
optional.add_argument(
"-speed-factor",
metavar=Metavar.float,
type=float,
help='Acceleration factor for the calculation of the straightening warping field.'
' This speed factor enables an intermediate resampling to a lower resolution, which '
'decreases the computational time at the cost of lower accuracy.'
' A speed factor of 2 means that the input image will be downsampled by a factor 2 '
'before calculating the straightening warping field. For example, a 1x1x1 mm^3 image '
'will be downsampled to 2x2x2 mm3, providing a speed factor of approximately 8.'
' Note that accelerating the straightening process reduces the precision of the '
'algorithm, and induces undesirable edges effects. Default=1 (no downsampling).',
required=False,
default=1)
optional.add_argument(
"-xy-size",
metavar=Metavar.float,
type=float,
help='Size of the output FOV in the RL/AP plane, in mm. The resolution of the destination '
'image is the same as that of the source image (-i). Default: 35.',
required=False,
default=35.0)
optional.add_argument(
"-o",
metavar=Metavar.file,
help='Straightened file. By default, the suffix "_straight" will be added to the input file name.',
required=False,
default='')
optional.add_argument(
"-ofolder",
metavar=Metavar.folder,
help="Output folder (all outputs will go there).",
action=ActionCreateFolder,
required=False,
default='./')
optional.add_argument(
'-centerline-algo',
help='Algorithm for centerline fitting. Default: nurbs.',
choices=('bspline', 'linear', 'nurbs'),
default='nurbs')
optional.add_argument(
'-centerline-smooth',
metavar=Metavar.int,
type=int,
help='Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}. Default: 10',
default=10)
optional.add_argument(
"-param",
metavar=Metavar.list,
help="R|Parameters for spinal cord straightening. Separate arguments with \",\".\n"
" - precision: Float [1, inf) Precision factor of straightening, related to the number of slices. Increasing this parameter increases the precision along with increased computational time. Not taken into account with Hanning fitting method. Default=2\n"
" - threshold_distance: Float [0, inf) Threshold at which voxels are not considered into displacement. Increase this threshold if the image is blackout around the spinal cord too much. Default=10\n"
" - accuracy_results: {0, 1} Disable/Enable computation of accuracy results after straightening. Default=0\n"
" - template_orientation: {0, 1} Disable/Enable orientation of the straight image to be the same as the template. Default=0",
required=False)
optional.add_argument(
"-x",
help="Final interpolation. Default: spline.",
choices=("nn", "linear", "spline"),
default="spline")
optional.add_argument(
'-qc',
metavar=Metavar.str,
help='The path where the quality control generated content will be saved',
default=None)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help='If provided, this string will be mentioned in the QC report as the dataset the '
'process was run on',
default=None)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help='If provided, this string will be mentioned in the QC report as the subject the '
'process was run on',
default=None)
optional.add_argument(
"-r",
type=int,
help="Remove temporary files.",
required=False,
choices=(0, 1),
default=1)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
input_filename = arguments.i
centerline_file = arguments.s
sc_straight = SpinalCordStraightener(input_filename, centerline_file)
if arguments.dest is not None:
sc_straight.use_straight_reference = True
sc_straight.centerline_reference_filename = str(arguments.dest)
if arguments.ldisc_input is not None:
if not sc_straight.use_straight_reference:
printv('Warning: discs position are not taken into account if reference is not provided.')
else:
sc_straight.discs_input_filename = str(arguments.ldisc_input)
sc_straight.precision = 4.0
if arguments.ldisc_dest is not None:
if not sc_straight.use_straight_reference:
printv('Warning: discs position are not taken into account if reference is not provided.')
else:
sc_straight.discs_ref_filename = str(arguments.ldisc_dest)
sc_straight.precision = 4.0
# Handling optional arguments
sc_straight.remove_temp_files = arguments.r
sc_straight.interpolation_warp = arguments.x
sc_straight.output_filename = arguments.o
sc_straight.path_output = arguments.ofolder
path_qc = arguments.qc
sc_straight.verbose = verbose
# if arguments.cpu_nb is not None:
# sc_straight.cpu_number = arguments.cpu-nb)
if arguments.disable_straight2curved:
sc_straight.straight2curved = False
if arguments.disable_curved2straight:
sc_straight.curved2straight = False
if arguments.speed_factor:
sc_straight.speed_factor = arguments.speed_factor
if arguments.xy_size:
sc_straight.xy_size = arguments.xy_size
sc_straight.param_centerline = ParamCenterline(
algo_fitting=arguments.centerline_algo,
smooth=arguments.centerline_smooth)
if arguments.param is not None:
params_user = arguments.param
# update registration parameters
for param in params_user:
param_split = param.split('=')
if param_split[0] == 'precision':
sc_straight.precision = float(param_split[1])
if param_split[0] == 'threshold_distance':
sc_straight.threshold_distance = float(param_split[1])
if param_split[0] == 'accuracy_results':
sc_straight.accuracy_results = int(param_split[1])
if param_split[0] == 'template_orientation':
sc_straight.template_orientation = int(param_split[1])
fname_straight = sc_straight.straighten()
printv("\nFinished! Elapsed time: {} s".format(sc_straight.elapsed_time), verbose)
# Generate QC report
if path_qc is not None:
path_qc = os.path.abspath(path_qc)
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
generate_qc(fname_straight, args=arguments, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process=os.path.basename(__file__.strip('.py')))
display_viewer_syntax([fname_straight], verbose=verbose)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
# !/usr/bin/env python
#
# This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get
# using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal
# cord was straightened.
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: <NAME>, <NAME>, <NAME>
# Modified: 2014-09-01
#
# License: see the LICENSE.TXT
# ======================================================================================================================
import sys
import os
from spinalcordtoolbox.straightening import SpinalCordStraightener
from spinalcordtoolbox.centerline.core import ParamCenterline
from spinalcordtoolbox.reports.qc import generate_qc
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, ActionCreateFolder, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
def get_parser():
parser = SCTArgumentParser(
description="This program takes as input an anatomic image and the spinal cord centerline (or "
"segmentation), and returns the an image of a straightened spinal cord. Reference: "
"<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, "
"<NAME>. Topologically-preserving straightening of spinal cord MRI. J Magn "
"Reson Imaging. 2017 Oct;46(4):1209-1219"
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help='Input image with curved spinal cord. Example: "t2.nii.gz"',
required=True)
mandatory.add_argument(
"-s",
metavar=Metavar.file,
help='Spinal cord centerline (or segmentation) of the input image. To obtain the centerline, you can use '
'sct_get_centerline. To obtain the segmentation you can use sct_propseg or sct_deepseg_sc. '
'Example: centerline.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
optional.add_argument(
"-dest",
metavar=Metavar.file,
help="Spinal cord centerline (or segmentation) of a destination image (which could be "
"straight or curved). An algorithm scales the length of the input centerline to match that of the "
"destination centerline. If using -ldisc-input and -ldisc-dest with this parameter, "
"instead of linear scaling, the source centerline will be non-linearly matched so "
"that the inter-vertebral discs of the input image will match that of the "
"destination image. This feature is particularly useful for registering to a "
"template while accounting for disc alignment.",
required=False)
optional.add_argument(
"-ldisc-input",
metavar=Metavar.file,
help="Labels located at the posterior edge of the intervertebral discs, for the input "
"image (-i). All disc covering the region of interest should be provided. Exmaple: if "
"you are interested in levels C2 to C7, then you should provide disc labels 2,3,4,5,"
"6,7). More details about label creation at "
"http://sourceforge.net/p/spinalcordtoolbox/wiki/create_labels/. " # TODO (Julien) update this link
"This option must be used with the -ldisc-dest parameter.",
required=False)
optional.add_argument(
"-ldisc-dest",
metavar=Metavar.file,
help="Labels located at the posterior edge of the intervertebral discs, for the destination file (-dest). "
"The same comments as in -ldisc-input apply. This option must be used with the -ldisc-input parameter.",
required=False)
optional.add_argument(
"-disable-straight2curved",
action='store_true',
help="Disable straight to curved transformation computation, in case you do not need the "
"output warping field straight-->curve (faster).",
required=False)
optional.add_argument(
"-disable-curved2straight",
action='store_true',
help="Disable curved to straight transformation computation, in case you do not need the "
"output warping field curve-->straight (faster).",
required=False)
optional.add_argument(
"-speed-factor",
metavar=Metavar.float,
type=float,
help='Acceleration factor for the calculation of the straightening warping field.'
' This speed factor enables an intermediate resampling to a lower resolution, which '
'decreases the computational time at the cost of lower accuracy.'
' A speed factor of 2 means that the input image will be downsampled by a factor 2 '
'before calculating the straightening warping field. For example, a 1x1x1 mm^3 image '
'will be downsampled to 2x2x2 mm3, providing a speed factor of approximately 8.'
' Note that accelerating the straightening process reduces the precision of the '
'algorithm, and induces undesirable edges effects. Default=1 (no downsampling).',
required=False,
default=1)
optional.add_argument(
"-xy-size",
metavar=Metavar.float,
type=float,
help='Size of the output FOV in the RL/AP plane, in mm. The resolution of the destination '
'image is the same as that of the source image (-i). Default: 35.',
required=False,
default=35.0)
optional.add_argument(
"-o",
metavar=Metavar.file,
help='Straightened file. By default, the suffix "_straight" will be added to the input file name.',
required=False,
default='')
optional.add_argument(
"-ofolder",
metavar=Metavar.folder,
help="Output folder (all outputs will go there).",
action=ActionCreateFolder,
required=False,
default='./')
optional.add_argument(
'-centerline-algo',
help='Algorithm for centerline fitting. Default: nurbs.',
choices=('bspline', 'linear', 'nurbs'),
default='nurbs')
optional.add_argument(
'-centerline-smooth',
metavar=Metavar.int,
type=int,
help='Degree of smoothing for centerline fitting. Only use with -centerline-algo {bspline, linear}. Default: 10',
default=10)
optional.add_argument(
"-param",
metavar=Metavar.list,
help="R|Parameters for spinal cord straightening. Separate arguments with \",\".\n"
" - precision: Float [1, inf) Precision factor of straightening, related to the number of slices. Increasing this parameter increases the precision along with increased computational time. Not taken into account with Hanning fitting method. Default=2\n"
" - threshold_distance: Float [0, inf) Threshold at which voxels are not considered into displacement. Increase this threshold if the image is blackout around the spinal cord too much. Default=10\n"
" - accuracy_results: {0, 1} Disable/Enable computation of accuracy results after straightening. Default=0\n"
" - template_orientation: {0, 1} Disable/Enable orientation of the straight image to be the same as the template. Default=0",
required=False)
optional.add_argument(
"-x",
help="Final interpolation. Default: spline.",
choices=("nn", "linear", "spline"),
default="spline")
optional.add_argument(
'-qc',
metavar=Metavar.str,
help='The path where the quality control generated content will be saved',
default=None)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help='If provided, this string will be mentioned in the QC report as the dataset the '
'process was run on',
default=None)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help='If provided, this string will be mentioned in the QC report as the subject the '
'process was run on',
default=None)
optional.add_argument(
"-r",
type=int,
help="Remove temporary files.",
required=False,
choices=(0, 1),
default=1)
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
input_filename = arguments.i
centerline_file = arguments.s
sc_straight = SpinalCordStraightener(input_filename, centerline_file)
if arguments.dest is not None:
sc_straight.use_straight_reference = True
sc_straight.centerline_reference_filename = str(arguments.dest)
if arguments.ldisc_input is not None:
if not sc_straight.use_straight_reference:
printv('Warning: discs position are not taken into account if reference is not provided.')
else:
sc_straight.discs_input_filename = str(arguments.ldisc_input)
sc_straight.precision = 4.0
if arguments.ldisc_dest is not None:
if not sc_straight.use_straight_reference:
printv('Warning: discs position are not taken into account if reference is not provided.')
else:
sc_straight.discs_ref_filename = str(arguments.ldisc_dest)
sc_straight.precision = 4.0
# Handling optional arguments
sc_straight.remove_temp_files = arguments.r
sc_straight.interpolation_warp = arguments.x
sc_straight.output_filename = arguments.o
sc_straight.path_output = arguments.ofolder
path_qc = arguments.qc
sc_straight.verbose = verbose
# if arguments.cpu_nb is not None:
# sc_straight.cpu_number = arguments.cpu-nb)
if arguments.disable_straight2curved:
sc_straight.straight2curved = False
if arguments.disable_curved2straight:
sc_straight.curved2straight = False
if arguments.speed_factor:
sc_straight.speed_factor = arguments.speed_factor
if arguments.xy_size:
sc_straight.xy_size = arguments.xy_size
sc_straight.param_centerline = ParamCenterline(
algo_fitting=arguments.centerline_algo,
smooth=arguments.centerline_smooth)
if arguments.param is not None:
params_user = arguments.param
# update registration parameters
for param in params_user:
param_split = param.split('=')
if param_split[0] == 'precision':
sc_straight.precision = float(param_split[1])
if param_split[0] == 'threshold_distance':
sc_straight.threshold_distance = float(param_split[1])
if param_split[0] == 'accuracy_results':
sc_straight.accuracy_results = int(param_split[1])
if param_split[0] == 'template_orientation':
sc_straight.template_orientation = int(param_split[1])
fname_straight = sc_straight.straighten()
printv("\nFinished! Elapsed time: {} s".format(sc_straight.elapsed_time), verbose)
# Generate QC report
if path_qc is not None:
path_qc = os.path.abspath(path_qc)
qc_dataset = arguments.qc_dataset
qc_subject = arguments.qc_subject
generate_qc(fname_straight, args=arguments, path_qc=os.path.abspath(path_qc),
dataset=qc_dataset, subject=qc_subject, process=os.path.basename(__file__.strip('.py')))
display_viewer_syntax([fname_straight], verbose=verbose)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
|
en
| 0.57699
|
# !/usr/bin/env python # # This program takes as input an anatomic image and the centerline or segmentation of its spinal cord (that you can get # using sct_get_centerline.py or sct_segmentation_propagation) and returns the anatomic image where the spinal # cord was straightened. # # --------------------------------------------------------------------------------------- # Copyright (c) 2013 NeuroPoly, Polytechnique Montreal <www.neuro.polymtl.ca> # Authors: <NAME>, <NAME>, <NAME> # Modified: 2014-09-01 # # License: see the LICENSE.TXT # ====================================================================================================================== # TODO (Julien) update this link # Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API # MAIN # ========================================================================================== Main function :param argv: :return: # Handling optional arguments # if arguments.cpu_nb is not None: # sc_straight.cpu_number = arguments.cpu-nb) # update registration parameters # Generate QC report
| 2.884613
| 3
|
calicoctl/calico_ctl/node.py
|
EdSchouten/calico-containers
| 0
|
6625589
|
<gh_stars>0
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import sys
import docker
import docker.errors
import docker.utils
from netaddr import IPAddress, AddrFormatError
from prettytable import PrettyTable
from pycalico.datastore import (ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT,
ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV,
ETCD_CA_CERT_FILE_ENV, ETCD_SCHEME_ENV,
ETCD_SCHEME_DEFAULT, ETCD_ENDPOINTS_ENV)
from pycalico.datastore_datatypes import BGPPeer
from pycalico.datastore_errors import DataStoreError
from pycalico.netns import remove_veth
from pycalico.util import validate_asn, validate_ip
from subprocess32 import call
from checksystem import check_system
from connectors import client, docker_client
from utils import (REQUIRED_MODULES, running_in_container, enforce_root,
get_container_ipv_from_arguments, hostname, print_paragraph,
convert_asn_to_asplain,
ipv6_enabled)
__doc__ = """
Usage:
calicoctl node [--ip=<IP>] [--ip6=<IP6>] [--node-image=<DOCKER_IMAGE_NAME>]
[--runtime=<RUNTIME>] [--as=<AS_NUM>] [--log-dir=<LOG_DIR>]
[--detach=<DETACH>] [--no-pull]
[(--libnetwork [--libnetwork-image=<LIBNETWORK_IMAGE_NAME>])]
[--backend=(bird | gobgp | none)]
calicoctl node stop [--force]
calicoctl node remove [--hostname=<HOSTNAME>] [--remove-endpoints]
calicoctl node show
calicoctl node bgp peer add <PEER_IP> as <AS_NUM>
calicoctl node bgp peer remove <PEER_IP>
calicoctl node bgp peer show [--ipv4 | --ipv6]
Description:
Configure the Calico node containers as well as default BGP information
for this node.
Options:
--as=<AS_NUM> The default AS number for this node.
--detach=<DETACH> Set "true" to run Calico service as detached,
"false" to run in the foreground. When using
libnetwork, this may not be set to "false".
When using --runtime=rkt, --detach is always false.
[default: true]
--force Forcefully stop the Calico node
--hostname=<HOSTNAME> The hostname from which to remove the Calico node.
--ip=<IP> The local management address to use.
--ip6=<IP6> The local IPv6 management address to use.
--ipv4 Show IPv4 information only.
--ipv6 Show IPv6 information only.
--libnetwork (Deprecated) Use the libnetwork plugin.
--libnetwork-image=<LIBNETWORK_IMAGE_NAME> (Deprecated) This flag will be ignored.
[default: calico/node-libnetwork:latest]
--log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico]
--no-pull Prevent from pulling the Calico node Docker images.
--node-image=<DOCKER_IMAGE_NAME> Docker image to use for Calico's per-node
container. [default: calico/node:latest]
--remove-endpoints Remove the endpoint data when deleting the node
from the Calico network.
--runtime=<RUNTIME> Specify how Calico services should be
launched. When set to "docker" or "rkt", services
will be launched via the calico-node container,
whereas a value of "none" will not launch them at
all. [default: docker]
--backend=<BACKEND> Specify which networking backend to use.
Choices are "bird", "gobgp" or "none".
When set to "none", Calico node run in policy
only mode.
"""
CALICO_NETWORKING_ENV = "CALICO_NETWORKING"
CALICO_NETWORKING_DEFAULT = "true"
NO_DEFAULT_POOLS_ENV = "NO_DEFAULT_POOLS"
ETCD_KEY_NODE_FILE = "/etc/calico/certs/key.pem"
ETCD_CERT_NODE_FILE = "/etc/calico/certs/cert.crt"
ETCD_CA_CERT_NODE_FILE = "/etc/calico/certs/ca_cert.crt"
def validate_arguments(arguments):
"""
Validate argument values:
<IP>
<IP6>
<PEER_IP>
<AS_NUM>
<DETACH>
Arguments not validated:
<DOCKER_IMAGE_NAME>
<LOG_DIR>
:param arguments: Docopt processed arguments
"""
# Validate IPs
ip_ok = arguments.get("--ip") is None or \
arguments.get("--ip") is "" or \
validate_ip(arguments.get("--ip"), 4)
ip6_ok = arguments.get("--ip6") is None or \
arguments.get("--ip6") is "" or \
validate_ip(arguments.get("--ip6"), 6)
container_ip_ok = arguments.get("<IP>") is None or \
validate_ip(arguments["<IP>"], 4) or \
validate_ip(arguments["<IP>"], 6)
peer_ip_ok = arguments.get("<PEER_IP>") is None or \
validate_ip(arguments["<PEER_IP>"], 4) or \
validate_ip(arguments["<PEER_IP>"], 6)
runtime_ok = arguments.get("--runtime") in [None, "none", "docker", "rkt"]
asnum_ok = True
asnum = arguments.get("<AS_NUM>") or arguments.get("--as")
if asnum:
asnum_ok = validate_asn(asnum)
detach_ok = True
if arguments.get("<DETACH>") or arguments.get("--detach"):
detach_ok = arguments.get("--detach") in ["true", "false"]
detach_libnetwork_ok = (arguments.get("--detach") == "true" or
not arguments.get("--libnetwork"))
# Print error message
if not ip_ok:
print "Invalid IPv4 address specified with --ip argument."
if not ip6_ok:
print "Invalid IPv6 address specified with --ip6 argument."
if not container_ip_ok or not peer_ip_ok:
print "Invalid IP address specified."
if not asnum_ok:
print "Invalid AS Number specified."
if not detach_ok:
print "Valid values for --detach are 'true' and 'false'"
if not detach_libnetwork_ok:
print "The only valid value for --detach is 'true' when using libnetwork"
if not runtime_ok:
print "Runtime must be 'docker', 'rkt' or 'none'."
# Exit if not valid argument
if not (ip_ok and ip6_ok and container_ip_ok and peer_ip_ok and asnum_ok
and detach_ok and detach_libnetwork_ok and runtime_ok):
sys.exit(1)
def get_networking_backend(docopt_backend):
"""
Weighs both the deprecated CALICO_NETWORKING_ENV and the new --backend from docopt to determine
which backend should be used. Ideally, we could use the docopt [default: bird], but until we've
finished deprecating CALICO_NETWORKING_ENV, we need to be able to consider every combination of the
two variables.
:param docopt_backend: The docopt value for --backend. Should be "bird", "gobgp", "none", or None (if
they are using CALICO_NETWORKING_ENV instead).
:return:
"""
# If backend was specified via docopt, use it, as command line args take precedence over ENV vars.
if docopt_backend != None:
return docopt_backend
else:
# Otherwise, check if they are using the old binary flag: CALICO_NETWORK_ENV
calico_networking = os.getenv(CALICO_NETWORKING_ENV)
if not calico_networking:
# Neither environment variable nor command line passed, use default: bird.
return "bird"
else:
print >> sys.stderr, "WARNING: %s will be deprecated: use '--backend' instead" \
% (CALICO_NETWORKING_ENV)
if calico_networking == "false":
# environment variable passed to disable Bird. use: none
return "none"
else:
# environment variable passed as assumed default. use: bird.
return "bird"
def node(arguments):
"""
Main dispatcher for node commands. Calls the corresponding helper function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
validate_arguments(arguments)
backend = get_networking_backend(arguments.get('--backend'))
as_num = convert_asn_to_asplain(arguments.get("<AS_NUM>") or arguments.get("--as"))
if arguments.get("bgp"):
if arguments.get("peer"):
ip_version = get_container_ipv_from_arguments(arguments)
if arguments.get("add"):
node_bgppeer_add(arguments.get("<PEER_IP>"), ip_version,
as_num)
elif arguments.get("remove"):
node_bgppeer_remove(arguments.get("<PEER_IP>"), ip_version)
elif arguments.get("show"):
if not ip_version:
node_bgppeer_show(4)
node_bgppeer_show(6)
else:
node_bgppeer_show(ip_version)
elif arguments.get("stop"):
node_stop(arguments.get("--force"))
elif arguments.get("remove"):
node_remove(arguments.get("--remove-endpoints"),
arguments.get("--hostname"))
elif arguments.get("show"):
node_show()
else:
assert arguments.get("--detach") in ["true", "false"]
detach = arguments.get("--detach") == "true"
# Set libnetwork_enabled to False if --libnetwork flag is not passed
libnetwork_enabled = False if not arguments.get("--libnetwork") else True
node_start(ip=arguments.get("--ip"),
node_image=arguments.get('--node-image'),
runtime=arguments.get("--runtime"),
log_dir=arguments.get("--log-dir"),
ip6=arguments.get("--ip6"),
as_num=as_num,
detach=detach,
libnetwork_enabled=libnetwork_enabled,
no_pull=arguments.get("--no-pull"),
backend=backend)
def node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork_enabled, no_pull, backend):
"""
Create the calico-node container and establish Calico networking on this
host.
:param ip: The IPv4 address of the host.
:param node_image: The calico-node image to use.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The BGP AS Number to use for this node. If not specified
the global default value will be used.
:param detach: True to run in Docker's "detached" mode, False to run
attached.
:param libnetwork_enabled: True to run libnetwork plugin inside calico-node.
:param no_pull: Boolean, True to prevent function from pulling the Calico
node Docker images.
:param backend: String, backend choice. Should be "bird", "none", or "gobgp".
:return: None.
"""
# The command has to be run as root to access iptables and services
enforce_root()
# Normally, Felix will load the modules it needs, but when running inside a
# container it might not be able to do so. Ensure the required modules are
# loaded each time the node starts.
# We only make a best effort attempt because the command may fail if the
# modules are built in.
# We'll warn during the check_system() if the modules are unavailable.
if not running_in_container():
try:
call(["modprobe", "-a"] + REQUIRED_MODULES)
except OSError:
pass
_setup_ip_forwarding()
_set_nf_conntrack_max()
# Print warnings for any known system issues before continuing
if runtime == 'docker' and not running_in_container():
using_docker = True
else:
using_docker = False
(_, docker_ok, etcd_ok) = \
check_system(quit_if_error=False, libnetwork=libnetwork_enabled,
check_docker=using_docker,
check_modules=not running_in_container())
if not etcd_ok or (using_docker and not docker_ok):
sys.exit(1)
# Ensure log directory and /var/run/calico exist so that they can be
# mounted into the containers.
try:
os.makedirs(log_dir)
except OSError:
pass
try:
os.makedirs("/var/run/calico")
except OSError:
pass
# The format of the authority and endpoints strings have already been
# validated.
etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT)
etcd_endpoints = os.getenv(ETCD_ENDPOINTS_ENV)
# Get etcd SSL environment variables if they exist
etcd_scheme = os.getenv(ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT)
etcd_key_file = os.getenv(ETCD_KEY_FILE_ENV)
etcd_cert_file = os.getenv(ETCD_CERT_FILE_ENV)
etcd_ca_cert_file = os.getenv(ETCD_CA_CERT_FILE_ENV)
etcd_volumes = []
etcd_binds = {}
etcd_envs = ["ETCD_AUTHORITY=%s" % etcd_authority,
"ETCD_SCHEME=%s" % etcd_scheme]
if etcd_endpoints:
etcd_envs.append("ETCD_ENDPOINTS=%s" % etcd_endpoints)
if etcd_ca_cert_file:
etcd_volumes.append(ETCD_CA_CERT_NODE_FILE)
etcd_binds[etcd_ca_cert_file] = {"bind": ETCD_CA_CERT_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE)
if etcd_key_file and etcd_cert_file:
etcd_volumes.append(ETCD_KEY_NODE_FILE)
etcd_binds[etcd_key_file] = {"bind": ETCD_KEY_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE)
etcd_volumes.append(ETCD_CERT_NODE_FILE)
etcd_binds[etcd_cert_file] = {"bind": ETCD_CERT_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE)
if runtime == 'docker':
_start_node_container_docker(ip, ip6, as_num, log_dir, node_image, detach,
etcd_envs, etcd_volumes, etcd_binds, libnetwork_enabled, no_pull, backend)
if runtime == 'rkt':
_start_node_container_rkt(ip, ip6, as_num, node_image, etcd_envs,
etcd_volumes, etcd_binds, backend)
def _start_node_container_docker(ip, ip6, as_num, log_dir, node_image, detach, etcd_envs,
etcd_volumes, etcd_binds, libnetwork_enabled, no_pull, backend):
"""
Start the main Calico node container.
:param ip: The IPv4 address of the host.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The AS number for the host
:param log_dir: The log directory to use.
:param libnetwork_enabled: True to run libnetwork plugin inside calico-node.
:param detach: True to run in Docker's "detached" mode, False to run
attached.
:param etcd_envs: Etcd environment variables to pass into the container
:param etcd_volumes: List of mount_paths for etcd files to mount on the
container
:param etcd_binds: Dictionary of host file and mount file pairs for etcd
files to mount on the container
:param no_pull: Boolean, True to prevent function from pulling the Calico
node Docker image.
:return: None.
"""
no_default_pools = os.getenv(NO_DEFAULT_POOLS_ENV)
if not no_pull:
# Make sure the required image is pulled before removing the old one.
# This minimizes downtime during upgrade.
_find_or_pull_node_image(node_image)
try:
docker_client.remove_container("calico-node", force=True)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
# This is to convert libnetwork_enabled (bool) into a string to pass it as an ENV var value
if libnetwork_enabled:
libnetwork_flag_str = "true"
else:
libnetwork_flag_str = "false"
environment = [
"HOSTNAME=%s" % hostname,
"IP=%s" % (ip or ""),
"IP6=%s" % (ip6 or ""),
"CALICO_NETWORKING_BACKEND=%s" % backend,
"AS=%s" % (as_num or ""),
"NO_DEFAULT_POOLS=%s" % (no_default_pools or ""),
"CALICO_LIBNETWORK_ENABLED=%s" % libnetwork_flag_str
] + etcd_envs
binds = {
log_dir:
{
"bind": "/var/log/calico",
"ro": False
},
"/var/run/calico":
{
"bind": "/var/run/calico",
"ro": False
},
"/lib/modules":
{
"bind": "/lib/modules",
"ro": False
}
}
# Additional rw bind (/run/docker/plugins) necessory when libnetwork is enabled
if libnetwork_enabled:
binds["/run/docker/plugins"] = {
"bind": "/run/docker/plugins",
"ro": False
}
binds.update(etcd_binds)
host_config = docker_client.create_host_config(
privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds)
volumes = ["/var/log/calico", "/var/run/calico", "/lib/modules"] + etcd_volumes
# Add /run/docker/plugins to the list of volumes to be mounted when libnetwork is enabled
if libnetwork_enabled:
volumes.append("/run/docker/plugins")
container = docker_client.create_container(
node_image,
name="calico-node",
detach=True,
environment=environment,
host_config=host_config,
volumes=volumes)
cid = container["Id"]
env_string = ""
for an_env in environment:
env_string += " -e " + an_env
vol_string = ""
for a_vol in binds:
vol_string += " -v %s:%s" % (a_vol, binds[a_vol]["bind"])
detach_string = " -d" if detach else ""
print "Running Docker container with the following command:\n"
print "docker run%s --restart=always --net=host --privileged --name=calico-node%s%s %s\n" % \
(detach_string, env_string, vol_string, node_image)
docker_client.start(container)
print "Calico node is running with id: %s" % cid
# Print a message to indicate libnetwork plugin is running when libnetwork is enabled
if libnetwork_enabled:
print "Calico node running with libnetwork plugin enabled"
print "Waiting for successful startup"
_attach_and_stream(container, detach)
def _start_node_container_rkt(ip, ip6, as_num, node_image, etcd_envs,
etcd_volumes, etcd_binds, backend):
"""
Start the main Calico node container using rkt
:param ip: The IPv4 address of the host.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The AS number for the host.
:param node_image: The calico-node image to use.
:param etcd_envs: Etcd environment variables to pass into the container
:param etcd_volumes: List of mount_paths for etcd files to mount on the
container
:param etcd_binds: Dictionary of host file and mount file pairs for etcd
files to mount on the container
:param backend:
:return: None.
"""
if node_image == "calico/node:latest":
# The default image is being used so convert to the rkt format.
node_image = "registry-1.docker.io/calico/node:latest"
no_default_pools = os.getenv(NO_DEFAULT_POOLS_ENV)
environment = [
"CALICO_DISABLE_FILE_LOGGING=true",
"HOSTNAME=%s" % hostname,
"IP=%s" % (ip or ""),
"IP6=%s" % (ip6 or ""),
"CALICO_NETWORKING_BACKEND=%s" % backend,
"AS=%s" % (as_num or ""),
"NO_DEFAULT_POOLS=%s" % (no_default_pools or "")
] + etcd_envs
# TODO No support for SSL (etcd binds) yet
env_commands = []
for env_var in environment:
env_commands += ["--set-env=%s" % (env_var)]
# Maybe in future we'll want to have a configurable path for the
# stage1-fly.aci but for now use the following algorithm
# 1) If there is a file in the current directory, use that.
# 2) Otherwise use the file from the default location.
#
# This allows the image to be overridden (e.g. if using a custom version of
# rkt on CoreOS where the default file can't be updated)
stage1_filename = "stage1-fly.aci"
if os.path.isfile(stage1_filename):
stage1_path = stage1_filename
else:
stage1_path = "/usr/share/rkt/stage1-fly.aci"
rkt_command = ["systemd-run", "--unit=calico-node", "rkt", "run",
"--stage1-path=%s" % stage1_path,
"--insecure-options=image",
"--volume=birdctl,kind=host,source=/var/run/calico,readOnly=false",
"--mount", "volume=birdctl,target=/var/run/calico",
"--volume=modules,kind=host,source=/lib/modules,readOnly=false",
"--mount", "volume=modules,target=/lib/modules"
] + \
env_commands + \
[node_image]
print " ".join(rkt_command)
call(rkt_command)
def _setup_ip_forwarding():
"""
Ensure that IP forwarding is enabled.
:return: None
"""
# Enable IP forwarding since all compute hosts are vRouters.
# IPv4 forwarding should be enabled already by docker.
try:
with open('/proc/sys/net/ipv4/ip_forward', 'w') as f:
f.write("1")
except Exception:
print "ERROR: Could not enable ipv4 forwarding."
sys.exit(1)
try:
if ipv6_enabled():
with open('/proc/sys/net/ipv6/conf/all/forwarding', 'w') as f:
f.write("1")
except Exception:
print "ERROR: Could not enable ipv6 forwarding."
sys.exit(1)
def _set_nf_conntrack_max():
"""
A common problem on Linux systems is running out of space in the conntrack
table, which can cause poor iptables performance. This can happen if you
run a lot of workloads on a given host, or if your workloads create a lot
of TCP connections or bidirectional UDP streams.
To avoid this becoming a problem, we recommend increasing the conntrack
table size. To do so, run the following commands:
"""
try:
with open('/proc/sys/net/netfilter/nf_conntrack_max', 'w') as f:
f.write("1000000")
except Exception:
print "WARNING: Could not set nf_contrack_max. This may have an impact at scale."
print "See http://docs.projectcalico.org/en/latest/configuration.html#system-configuration for more details"
def node_stop(force):
"""
Stop the Calico node. This stops the containers (calico/node and
calico/node-libnetwork) that are started by calicoctl node.
"""
# The command has to be run as root to stop the calico-node service
enforce_root()
endpoints = len(client.get_endpoints(hostname=hostname))
if endpoints:
if not force:
print_paragraph("Current host has active endpoints so can't be "
"stopped. Force with --force")
print_paragraph("Note that stopping the node while there are "
"active endpoints may make it difficult to clean "
"up the endpoints: for example, Docker containers "
"networked using libnetwork with Calico will not "
"invoke network cleanup during the normal "
"container lifecycle.")
sys.exit(1)
else:
print_paragraph("Stopping node while host has active endpoints. "
"If this in error, restart the node using the "
"'calicoctl node' command.")
try:
docker_client.stop("calico-node")
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
try:
docker_client.stop("calico-libnetwork")
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
try:
call(["systemctl", "stop", "calico-node.service"])
except OSError:
# systemctl not installed, ignore error.
pass
print "Node stopped"
def node_remove(remove_endpoints, host):
"""
Remove a node from the Calico network.
:param remove_endpoints: Whether the endpoint data should be forcibly
removed.
:param host: The hostname of the host whose node will be removed, or None if
removing this host's node.
:return: None.
"""
host_to_remove = host or hostname
if host_to_remove == hostname and (_container_running("calico-node") or
_container_running("calico-libnetwork")):
print_paragraph("The node cannot be removed while it is running. "
"Please run 'calicoctl node stop' to stop the node "
"before removing it.")
sys.exit(1)
endpoints = client.get_endpoints(hostname=host_to_remove)
if endpoints and not remove_endpoints:
print_paragraph("The node has active Calico endpoints so can't be "
"deleted. Force with --remove-endpoints")
print_paragraph("Note that forcible removing the node may leave some "
"workloads in an indeterminate networked state. If "
"this is in error, you may restart the node using the "
"'calicoctl node' command and clean up the workloads "
"in the normal way.")
sys.exit(1)
# Remove the veths, and release all IPs associated with the endpoints. To
# release the IPs, we construct a set of all IP addresses across all
# endpoints (this assumes the endpoint nets are all single IPs).
ips = set()
for endpoint in endpoints:
remove_veth(endpoint.name)
ips |= {net.ip for net in endpoint.ipv4_nets}
ips |= {net.ip for net in endpoint.ipv6_nets}
client.release_ips(ips)
# Remove the IPAM host data.
client.remove_ipam_host(host_to_remove)
# If the host had an IPIP tunnel address, release it back to the IPAM pool
# so that we don't leak it when we delete the config.
raw_addr = client.get_per_host_config(host_to_remove, "IpInIpTunnelAddr")
try:
ip_addr = IPAddress(raw_addr)
client.release_ips({ip_addr})
except (AddrFormatError, ValueError, TypeError):
pass
client.remove_per_host_config(host_to_remove, "IpInIpTunnelAddr")
client.remove_host(host_to_remove)
print "Node configuration removed"
def _container_running(container_name):
"""
Check if a container is currently running or not.
:param container_name: The container name or ID.
:return: True if running, otherwise False.
"""
try:
cdata = docker_client.inspect_container(container_name)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
return False
else:
return cdata["State"]["Running"]
def node_show():
"""
Show hostname and node information for each node in the Calico cluster.
"""
# Set up output table
headings = ["Hostname",
"Bird IPv4",
"Bird IPv6",
"AS Num",
"BGP Peers v4",
"BGP Peers v6"]
x = PrettyTable(headings, sortby="Hostname")
try:
# Get dictionary of host data, indexed by hostname
hosts = client.get_hosts_data_dict()
for (host, data) in hosts.iteritems():
# Combine BGP peer IP and AS numbers into single values
peer_v4_list = [peer["ip"] + " as " + peer["as_num"]
for peer in data["peer_v4"]]
peer_v6_list = [peer["ip"] + " as " + peer["as_num"]
for peer in data["peer_v6"]]
if data["as_num"]:
bgp_as = data["as_num"]
else:
bgp_as = client.get_default_node_as()
bgp_as += " (inherited)"
x.add_row([host,
data["ip_addr_v4"],
data["ip_addr_v6"],
bgp_as,
"\n".join(peer_v4_list),
"\n".join(peer_v6_list)])
except DataStoreError:
print "Error connecting to etcd."
sys.exit(1)
print str(x) + "\n"
def node_bgppeer_add(ip, version, as_num):
"""
Add a new BGP peer with the supplied IP address and AS Number to this node.
:param ip: The address to add
:param version: 4 or 6
:param as_num: The peer AS Number.
:return: None
"""
address = IPAddress(ip)
peer = BGPPeer(address, as_num)
client.add_bgp_peer(version, peer, hostname=hostname)
def node_bgppeer_remove(ip, version):
"""
Remove a global BGP peer from this node.
:param ip: The address to use.
:param version: 4 or 6
:return: None
"""
address = IPAddress(ip)
try:
client.remove_bgp_peer(version, address, hostname=hostname)
except KeyError:
print "%s is not a configured peer for this node." % address
sys.exit(1)
else:
print "BGP peer removed from node configuration"
def node_bgppeer_show(version):
"""
Print a list of the BGP Peers for this node.
"""
assert version in (4, 6)
peers = client.get_bgp_peers(version, hostname=hostname)
if peers:
heading = "Node specific IPv%s BGP Peer" % version
x = PrettyTable([heading, "AS Num"], sortby=heading)
for peer in peers:
x.add_row([peer.ip, peer.as_num])
x.align = "l"
print x.get_string(sortby=heading)
else:
print "No IPv%s BGP Peers defined for this node.\n" % version
def _find_or_pull_node_image(image_name):
"""
Check if Docker has a cached copy of an image, and if not, attempt to pull
it.
:param image_name: The full name of the image.
:return: None.
"""
try:
_ = docker_client.inspect_image(image_name)
except docker.errors.APIError as err:
if err.response.status_code == 404:
# TODO: Display proper status bar
print_paragraph("Pulling Docker image %s" % image_name)
try:
# Pull the image and then verify that it was succesfully
# pulled (the pull doesn't raise an exception on failure).
docker_client.pull(image_name)
docker_client.inspect_image(image_name)
except docker.errors.APIError:
# Unable to download the Docker image.
print_paragraph("ERROR: Unable to download Docker image.")
print_paragraph("Please verify that you have network "
"connectivity to DockerHub and that, if you "
"explicitly specified which calico/node image "
"to use, the image name is correct.")
sys.exit(1)
def _attach_and_stream(container, startup_only):
"""
Attach to a container and stream its stdout and stderr output to this
process's stdout. If the user presses Ctrl-C or the process is killed,
also stop the Docker container.
If startup_only is set, then only attach until the container starts up successfully.
:param container: Docker container to attach to.
:return: None.
"""
# Register a SIGTERM handler, so we shut down the container if this
# process is kill'd.
def handle_sigterm(sig, frame):
print "Got SIGTERM"
docker_client.stop(container)
sys.exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
stop_container_on_exit = True
exit_code = 1
output = docker_client.attach(container, stream=True)
line_buf = ""
try:
for raw_data in output:
sys.stdout.write(raw_data)
if startup_only:
# We've been asked to exit after the container has started,
# look for the successful startup message. We buffer one line
# of output in case we get a split line from the output stream.
line_buf += raw_data
if "Calico node started successfully" in line_buf:
stop_container_on_exit = False
break
line_buf = line_buf.rsplit('\n')[-1]
except KeyboardInterrupt:
# Mainline. Someone pressed Ctrl-C.
print "Stopping Calico node..."
stop_container_on_exit = True
exit_code = 130
finally:
# Could either be this process is being killed, or output generator
# raises an exception.
if stop_container_on_exit:
docker_client.stop(container)
# If the container is stopped, some sort of error occurred.
sys.exit(exit_code)
|
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import signal
import sys
import docker
import docker.errors
import docker.utils
from netaddr import IPAddress, AddrFormatError
from prettytable import PrettyTable
from pycalico.datastore import (ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT,
ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV,
ETCD_CA_CERT_FILE_ENV, ETCD_SCHEME_ENV,
ETCD_SCHEME_DEFAULT, ETCD_ENDPOINTS_ENV)
from pycalico.datastore_datatypes import BGPPeer
from pycalico.datastore_errors import DataStoreError
from pycalico.netns import remove_veth
from pycalico.util import validate_asn, validate_ip
from subprocess32 import call
from checksystem import check_system
from connectors import client, docker_client
from utils import (REQUIRED_MODULES, running_in_container, enforce_root,
get_container_ipv_from_arguments, hostname, print_paragraph,
convert_asn_to_asplain,
ipv6_enabled)
__doc__ = """
Usage:
calicoctl node [--ip=<IP>] [--ip6=<IP6>] [--node-image=<DOCKER_IMAGE_NAME>]
[--runtime=<RUNTIME>] [--as=<AS_NUM>] [--log-dir=<LOG_DIR>]
[--detach=<DETACH>] [--no-pull]
[(--libnetwork [--libnetwork-image=<LIBNETWORK_IMAGE_NAME>])]
[--backend=(bird | gobgp | none)]
calicoctl node stop [--force]
calicoctl node remove [--hostname=<HOSTNAME>] [--remove-endpoints]
calicoctl node show
calicoctl node bgp peer add <PEER_IP> as <AS_NUM>
calicoctl node bgp peer remove <PEER_IP>
calicoctl node bgp peer show [--ipv4 | --ipv6]
Description:
Configure the Calico node containers as well as default BGP information
for this node.
Options:
--as=<AS_NUM> The default AS number for this node.
--detach=<DETACH> Set "true" to run Calico service as detached,
"false" to run in the foreground. When using
libnetwork, this may not be set to "false".
When using --runtime=rkt, --detach is always false.
[default: true]
--force Forcefully stop the Calico node
--hostname=<HOSTNAME> The hostname from which to remove the Calico node.
--ip=<IP> The local management address to use.
--ip6=<IP6> The local IPv6 management address to use.
--ipv4 Show IPv4 information only.
--ipv6 Show IPv6 information only.
--libnetwork (Deprecated) Use the libnetwork plugin.
--libnetwork-image=<LIBNETWORK_IMAGE_NAME> (Deprecated) This flag will be ignored.
[default: calico/node-libnetwork:latest]
--log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico]
--no-pull Prevent from pulling the Calico node Docker images.
--node-image=<DOCKER_IMAGE_NAME> Docker image to use for Calico's per-node
container. [default: calico/node:latest]
--remove-endpoints Remove the endpoint data when deleting the node
from the Calico network.
--runtime=<RUNTIME> Specify how Calico services should be
launched. When set to "docker" or "rkt", services
will be launched via the calico-node container,
whereas a value of "none" will not launch them at
all. [default: docker]
--backend=<BACKEND> Specify which networking backend to use.
Choices are "bird", "gobgp" or "none".
When set to "none", Calico node run in policy
only mode.
"""
CALICO_NETWORKING_ENV = "CALICO_NETWORKING"
CALICO_NETWORKING_DEFAULT = "true"
NO_DEFAULT_POOLS_ENV = "NO_DEFAULT_POOLS"
ETCD_KEY_NODE_FILE = "/etc/calico/certs/key.pem"
ETCD_CERT_NODE_FILE = "/etc/calico/certs/cert.crt"
ETCD_CA_CERT_NODE_FILE = "/etc/calico/certs/ca_cert.crt"
def validate_arguments(arguments):
"""
Validate argument values:
<IP>
<IP6>
<PEER_IP>
<AS_NUM>
<DETACH>
Arguments not validated:
<DOCKER_IMAGE_NAME>
<LOG_DIR>
:param arguments: Docopt processed arguments
"""
# Validate IPs
ip_ok = arguments.get("--ip") is None or \
arguments.get("--ip") is "" or \
validate_ip(arguments.get("--ip"), 4)
ip6_ok = arguments.get("--ip6") is None or \
arguments.get("--ip6") is "" or \
validate_ip(arguments.get("--ip6"), 6)
container_ip_ok = arguments.get("<IP>") is None or \
validate_ip(arguments["<IP>"], 4) or \
validate_ip(arguments["<IP>"], 6)
peer_ip_ok = arguments.get("<PEER_IP>") is None or \
validate_ip(arguments["<PEER_IP>"], 4) or \
validate_ip(arguments["<PEER_IP>"], 6)
runtime_ok = arguments.get("--runtime") in [None, "none", "docker", "rkt"]
asnum_ok = True
asnum = arguments.get("<AS_NUM>") or arguments.get("--as")
if asnum:
asnum_ok = validate_asn(asnum)
detach_ok = True
if arguments.get("<DETACH>") or arguments.get("--detach"):
detach_ok = arguments.get("--detach") in ["true", "false"]
detach_libnetwork_ok = (arguments.get("--detach") == "true" or
not arguments.get("--libnetwork"))
# Print error message
if not ip_ok:
print "Invalid IPv4 address specified with --ip argument."
if not ip6_ok:
print "Invalid IPv6 address specified with --ip6 argument."
if not container_ip_ok or not peer_ip_ok:
print "Invalid IP address specified."
if not asnum_ok:
print "Invalid AS Number specified."
if not detach_ok:
print "Valid values for --detach are 'true' and 'false'"
if not detach_libnetwork_ok:
print "The only valid value for --detach is 'true' when using libnetwork"
if not runtime_ok:
print "Runtime must be 'docker', 'rkt' or 'none'."
# Exit if not valid argument
if not (ip_ok and ip6_ok and container_ip_ok and peer_ip_ok and asnum_ok
and detach_ok and detach_libnetwork_ok and runtime_ok):
sys.exit(1)
def get_networking_backend(docopt_backend):
"""
Weighs both the deprecated CALICO_NETWORKING_ENV and the new --backend from docopt to determine
which backend should be used. Ideally, we could use the docopt [default: bird], but until we've
finished deprecating CALICO_NETWORKING_ENV, we need to be able to consider every combination of the
two variables.
:param docopt_backend: The docopt value for --backend. Should be "bird", "gobgp", "none", or None (if
they are using CALICO_NETWORKING_ENV instead).
:return:
"""
# If backend was specified via docopt, use it, as command line args take precedence over ENV vars.
if docopt_backend != None:
return docopt_backend
else:
# Otherwise, check if they are using the old binary flag: CALICO_NETWORK_ENV
calico_networking = os.getenv(CALICO_NETWORKING_ENV)
if not calico_networking:
# Neither environment variable nor command line passed, use default: bird.
return "bird"
else:
print >> sys.stderr, "WARNING: %s will be deprecated: use '--backend' instead" \
% (CALICO_NETWORKING_ENV)
if calico_networking == "false":
# environment variable passed to disable Bird. use: none
return "none"
else:
# environment variable passed as assumed default. use: bird.
return "bird"
def node(arguments):
"""
Main dispatcher for node commands. Calls the corresponding helper function.
:param arguments: A dictionary of arguments already processed through
this file's docstring with docopt
:return: None
"""
validate_arguments(arguments)
backend = get_networking_backend(arguments.get('--backend'))
as_num = convert_asn_to_asplain(arguments.get("<AS_NUM>") or arguments.get("--as"))
if arguments.get("bgp"):
if arguments.get("peer"):
ip_version = get_container_ipv_from_arguments(arguments)
if arguments.get("add"):
node_bgppeer_add(arguments.get("<PEER_IP>"), ip_version,
as_num)
elif arguments.get("remove"):
node_bgppeer_remove(arguments.get("<PEER_IP>"), ip_version)
elif arguments.get("show"):
if not ip_version:
node_bgppeer_show(4)
node_bgppeer_show(6)
else:
node_bgppeer_show(ip_version)
elif arguments.get("stop"):
node_stop(arguments.get("--force"))
elif arguments.get("remove"):
node_remove(arguments.get("--remove-endpoints"),
arguments.get("--hostname"))
elif arguments.get("show"):
node_show()
else:
assert arguments.get("--detach") in ["true", "false"]
detach = arguments.get("--detach") == "true"
# Set libnetwork_enabled to False if --libnetwork flag is not passed
libnetwork_enabled = False if not arguments.get("--libnetwork") else True
node_start(ip=arguments.get("--ip"),
node_image=arguments.get('--node-image'),
runtime=arguments.get("--runtime"),
log_dir=arguments.get("--log-dir"),
ip6=arguments.get("--ip6"),
as_num=as_num,
detach=detach,
libnetwork_enabled=libnetwork_enabled,
no_pull=arguments.get("--no-pull"),
backend=backend)
def node_start(node_image, runtime, log_dir, ip, ip6, as_num, detach,
libnetwork_enabled, no_pull, backend):
"""
Create the calico-node container and establish Calico networking on this
host.
:param ip: The IPv4 address of the host.
:param node_image: The calico-node image to use.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The BGP AS Number to use for this node. If not specified
the global default value will be used.
:param detach: True to run in Docker's "detached" mode, False to run
attached.
:param libnetwork_enabled: True to run libnetwork plugin inside calico-node.
:param no_pull: Boolean, True to prevent function from pulling the Calico
node Docker images.
:param backend: String, backend choice. Should be "bird", "none", or "gobgp".
:return: None.
"""
# The command has to be run as root to access iptables and services
enforce_root()
# Normally, Felix will load the modules it needs, but when running inside a
# container it might not be able to do so. Ensure the required modules are
# loaded each time the node starts.
# We only make a best effort attempt because the command may fail if the
# modules are built in.
# We'll warn during the check_system() if the modules are unavailable.
if not running_in_container():
try:
call(["modprobe", "-a"] + REQUIRED_MODULES)
except OSError:
pass
_setup_ip_forwarding()
_set_nf_conntrack_max()
# Print warnings for any known system issues before continuing
if runtime == 'docker' and not running_in_container():
using_docker = True
else:
using_docker = False
(_, docker_ok, etcd_ok) = \
check_system(quit_if_error=False, libnetwork=libnetwork_enabled,
check_docker=using_docker,
check_modules=not running_in_container())
if not etcd_ok or (using_docker and not docker_ok):
sys.exit(1)
# Ensure log directory and /var/run/calico exist so that they can be
# mounted into the containers.
try:
os.makedirs(log_dir)
except OSError:
pass
try:
os.makedirs("/var/run/calico")
except OSError:
pass
# The format of the authority and endpoints strings have already been
# validated.
etcd_authority = os.getenv(ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT)
etcd_endpoints = os.getenv(ETCD_ENDPOINTS_ENV)
# Get etcd SSL environment variables if they exist
etcd_scheme = os.getenv(ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT)
etcd_key_file = os.getenv(ETCD_KEY_FILE_ENV)
etcd_cert_file = os.getenv(ETCD_CERT_FILE_ENV)
etcd_ca_cert_file = os.getenv(ETCD_CA_CERT_FILE_ENV)
etcd_volumes = []
etcd_binds = {}
etcd_envs = ["ETCD_AUTHORITY=%s" % etcd_authority,
"ETCD_SCHEME=%s" % etcd_scheme]
if etcd_endpoints:
etcd_envs.append("ETCD_ENDPOINTS=%s" % etcd_endpoints)
if etcd_ca_cert_file:
etcd_volumes.append(ETCD_CA_CERT_NODE_FILE)
etcd_binds[etcd_ca_cert_file] = {"bind": ETCD_CA_CERT_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_CA_CERT_FILE=%s" % ETCD_CA_CERT_NODE_FILE)
if etcd_key_file and etcd_cert_file:
etcd_volumes.append(ETCD_KEY_NODE_FILE)
etcd_binds[etcd_key_file] = {"bind": ETCD_KEY_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_KEY_FILE=%s" % ETCD_KEY_NODE_FILE)
etcd_volumes.append(ETCD_CERT_NODE_FILE)
etcd_binds[etcd_cert_file] = {"bind": ETCD_CERT_NODE_FILE,
"ro": True}
etcd_envs.append("ETCD_CERT_FILE=%s" % ETCD_CERT_NODE_FILE)
if runtime == 'docker':
_start_node_container_docker(ip, ip6, as_num, log_dir, node_image, detach,
etcd_envs, etcd_volumes, etcd_binds, libnetwork_enabled, no_pull, backend)
if runtime == 'rkt':
_start_node_container_rkt(ip, ip6, as_num, node_image, etcd_envs,
etcd_volumes, etcd_binds, backend)
def _start_node_container_docker(ip, ip6, as_num, log_dir, node_image, detach, etcd_envs,
etcd_volumes, etcd_binds, libnetwork_enabled, no_pull, backend):
"""
Start the main Calico node container.
:param ip: The IPv4 address of the host.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The AS number for the host
:param log_dir: The log directory to use.
:param libnetwork_enabled: True to run libnetwork plugin inside calico-node.
:param detach: True to run in Docker's "detached" mode, False to run
attached.
:param etcd_envs: Etcd environment variables to pass into the container
:param etcd_volumes: List of mount_paths for etcd files to mount on the
container
:param etcd_binds: Dictionary of host file and mount file pairs for etcd
files to mount on the container
:param no_pull: Boolean, True to prevent function from pulling the Calico
node Docker image.
:return: None.
"""
no_default_pools = os.getenv(NO_DEFAULT_POOLS_ENV)
if not no_pull:
# Make sure the required image is pulled before removing the old one.
# This minimizes downtime during upgrade.
_find_or_pull_node_image(node_image)
try:
docker_client.remove_container("calico-node", force=True)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
# This is to convert libnetwork_enabled (bool) into a string to pass it as an ENV var value
if libnetwork_enabled:
libnetwork_flag_str = "true"
else:
libnetwork_flag_str = "false"
environment = [
"HOSTNAME=%s" % hostname,
"IP=%s" % (ip or ""),
"IP6=%s" % (ip6 or ""),
"CALICO_NETWORKING_BACKEND=%s" % backend,
"AS=%s" % (as_num or ""),
"NO_DEFAULT_POOLS=%s" % (no_default_pools or ""),
"CALICO_LIBNETWORK_ENABLED=%s" % libnetwork_flag_str
] + etcd_envs
binds = {
log_dir:
{
"bind": "/var/log/calico",
"ro": False
},
"/var/run/calico":
{
"bind": "/var/run/calico",
"ro": False
},
"/lib/modules":
{
"bind": "/lib/modules",
"ro": False
}
}
# Additional rw bind (/run/docker/plugins) necessory when libnetwork is enabled
if libnetwork_enabled:
binds["/run/docker/plugins"] = {
"bind": "/run/docker/plugins",
"ro": False
}
binds.update(etcd_binds)
host_config = docker_client.create_host_config(
privileged=True,
restart_policy={"Name": "always"},
network_mode="host",
binds=binds)
volumes = ["/var/log/calico", "/var/run/calico", "/lib/modules"] + etcd_volumes
# Add /run/docker/plugins to the list of volumes to be mounted when libnetwork is enabled
if libnetwork_enabled:
volumes.append("/run/docker/plugins")
container = docker_client.create_container(
node_image,
name="calico-node",
detach=True,
environment=environment,
host_config=host_config,
volumes=volumes)
cid = container["Id"]
env_string = ""
for an_env in environment:
env_string += " -e " + an_env
vol_string = ""
for a_vol in binds:
vol_string += " -v %s:%s" % (a_vol, binds[a_vol]["bind"])
detach_string = " -d" if detach else ""
print "Running Docker container with the following command:\n"
print "docker run%s --restart=always --net=host --privileged --name=calico-node%s%s %s\n" % \
(detach_string, env_string, vol_string, node_image)
docker_client.start(container)
print "Calico node is running with id: %s" % cid
# Print a message to indicate libnetwork plugin is running when libnetwork is enabled
if libnetwork_enabled:
print "Calico node running with libnetwork plugin enabled"
print "Waiting for successful startup"
_attach_and_stream(container, detach)
def _start_node_container_rkt(ip, ip6, as_num, node_image, etcd_envs,
etcd_volumes, etcd_binds, backend):
"""
Start the main Calico node container using rkt
:param ip: The IPv4 address of the host.
:param ip6: The IPv6 address of the host (or None if not configured)
:param as_num: The AS number for the host.
:param node_image: The calico-node image to use.
:param etcd_envs: Etcd environment variables to pass into the container
:param etcd_volumes: List of mount_paths for etcd files to mount on the
container
:param etcd_binds: Dictionary of host file and mount file pairs for etcd
files to mount on the container
:param backend:
:return: None.
"""
if node_image == "calico/node:latest":
# The default image is being used so convert to the rkt format.
node_image = "registry-1.docker.io/calico/node:latest"
no_default_pools = os.getenv(NO_DEFAULT_POOLS_ENV)
environment = [
"CALICO_DISABLE_FILE_LOGGING=true",
"HOSTNAME=%s" % hostname,
"IP=%s" % (ip or ""),
"IP6=%s" % (ip6 or ""),
"CALICO_NETWORKING_BACKEND=%s" % backend,
"AS=%s" % (as_num or ""),
"NO_DEFAULT_POOLS=%s" % (no_default_pools or "")
] + etcd_envs
# TODO No support for SSL (etcd binds) yet
env_commands = []
for env_var in environment:
env_commands += ["--set-env=%s" % (env_var)]
# Maybe in future we'll want to have a configurable path for the
# stage1-fly.aci but for now use the following algorithm
# 1) If there is a file in the current directory, use that.
# 2) Otherwise use the file from the default location.
#
# This allows the image to be overridden (e.g. if using a custom version of
# rkt on CoreOS where the default file can't be updated)
stage1_filename = "stage1-fly.aci"
if os.path.isfile(stage1_filename):
stage1_path = stage1_filename
else:
stage1_path = "/usr/share/rkt/stage1-fly.aci"
rkt_command = ["systemd-run", "--unit=calico-node", "rkt", "run",
"--stage1-path=%s" % stage1_path,
"--insecure-options=image",
"--volume=birdctl,kind=host,source=/var/run/calico,readOnly=false",
"--mount", "volume=birdctl,target=/var/run/calico",
"--volume=modules,kind=host,source=/lib/modules,readOnly=false",
"--mount", "volume=modules,target=/lib/modules"
] + \
env_commands + \
[node_image]
print " ".join(rkt_command)
call(rkt_command)
def _setup_ip_forwarding():
"""
Ensure that IP forwarding is enabled.
:return: None
"""
# Enable IP forwarding since all compute hosts are vRouters.
# IPv4 forwarding should be enabled already by docker.
try:
with open('/proc/sys/net/ipv4/ip_forward', 'w') as f:
f.write("1")
except Exception:
print "ERROR: Could not enable ipv4 forwarding."
sys.exit(1)
try:
if ipv6_enabled():
with open('/proc/sys/net/ipv6/conf/all/forwarding', 'w') as f:
f.write("1")
except Exception:
print "ERROR: Could not enable ipv6 forwarding."
sys.exit(1)
def _set_nf_conntrack_max():
"""
A common problem on Linux systems is running out of space in the conntrack
table, which can cause poor iptables performance. This can happen if you
run a lot of workloads on a given host, or if your workloads create a lot
of TCP connections or bidirectional UDP streams.
To avoid this becoming a problem, we recommend increasing the conntrack
table size. To do so, run the following commands:
"""
try:
with open('/proc/sys/net/netfilter/nf_conntrack_max', 'w') as f:
f.write("1000000")
except Exception:
print "WARNING: Could not set nf_contrack_max. This may have an impact at scale."
print "See http://docs.projectcalico.org/en/latest/configuration.html#system-configuration for more details"
def node_stop(force):
"""
Stop the Calico node. This stops the containers (calico/node and
calico/node-libnetwork) that are started by calicoctl node.
"""
# The command has to be run as root to stop the calico-node service
enforce_root()
endpoints = len(client.get_endpoints(hostname=hostname))
if endpoints:
if not force:
print_paragraph("Current host has active endpoints so can't be "
"stopped. Force with --force")
print_paragraph("Note that stopping the node while there are "
"active endpoints may make it difficult to clean "
"up the endpoints: for example, Docker containers "
"networked using libnetwork with Calico will not "
"invoke network cleanup during the normal "
"container lifecycle.")
sys.exit(1)
else:
print_paragraph("Stopping node while host has active endpoints. "
"If this in error, restart the node using the "
"'calicoctl node' command.")
try:
docker_client.stop("calico-node")
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
try:
docker_client.stop("calico-libnetwork")
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
try:
call(["systemctl", "stop", "calico-node.service"])
except OSError:
# systemctl not installed, ignore error.
pass
print "Node stopped"
def node_remove(remove_endpoints, host):
"""
Remove a node from the Calico network.
:param remove_endpoints: Whether the endpoint data should be forcibly
removed.
:param host: The hostname of the host whose node will be removed, or None if
removing this host's node.
:return: None.
"""
host_to_remove = host or hostname
if host_to_remove == hostname and (_container_running("calico-node") or
_container_running("calico-libnetwork")):
print_paragraph("The node cannot be removed while it is running. "
"Please run 'calicoctl node stop' to stop the node "
"before removing it.")
sys.exit(1)
endpoints = client.get_endpoints(hostname=host_to_remove)
if endpoints and not remove_endpoints:
print_paragraph("The node has active Calico endpoints so can't be "
"deleted. Force with --remove-endpoints")
print_paragraph("Note that forcible removing the node may leave some "
"workloads in an indeterminate networked state. If "
"this is in error, you may restart the node using the "
"'calicoctl node' command and clean up the workloads "
"in the normal way.")
sys.exit(1)
# Remove the veths, and release all IPs associated with the endpoints. To
# release the IPs, we construct a set of all IP addresses across all
# endpoints (this assumes the endpoint nets are all single IPs).
ips = set()
for endpoint in endpoints:
remove_veth(endpoint.name)
ips |= {net.ip for net in endpoint.ipv4_nets}
ips |= {net.ip for net in endpoint.ipv6_nets}
client.release_ips(ips)
# Remove the IPAM host data.
client.remove_ipam_host(host_to_remove)
# If the host had an IPIP tunnel address, release it back to the IPAM pool
# so that we don't leak it when we delete the config.
raw_addr = client.get_per_host_config(host_to_remove, "IpInIpTunnelAddr")
try:
ip_addr = IPAddress(raw_addr)
client.release_ips({ip_addr})
except (AddrFormatError, ValueError, TypeError):
pass
client.remove_per_host_config(host_to_remove, "IpInIpTunnelAddr")
client.remove_host(host_to_remove)
print "Node configuration removed"
def _container_running(container_name):
"""
Check if a container is currently running or not.
:param container_name: The container name or ID.
:return: True if running, otherwise False.
"""
try:
cdata = docker_client.inspect_container(container_name)
except docker.errors.APIError as err:
if err.response.status_code != 404:
raise
return False
else:
return cdata["State"]["Running"]
def node_show():
"""
Show hostname and node information for each node in the Calico cluster.
"""
# Set up output table
headings = ["Hostname",
"Bird IPv4",
"Bird IPv6",
"AS Num",
"BGP Peers v4",
"BGP Peers v6"]
x = PrettyTable(headings, sortby="Hostname")
try:
# Get dictionary of host data, indexed by hostname
hosts = client.get_hosts_data_dict()
for (host, data) in hosts.iteritems():
# Combine BGP peer IP and AS numbers into single values
peer_v4_list = [peer["ip"] + " as " + peer["as_num"]
for peer in data["peer_v4"]]
peer_v6_list = [peer["ip"] + " as " + peer["as_num"]
for peer in data["peer_v6"]]
if data["as_num"]:
bgp_as = data["as_num"]
else:
bgp_as = client.get_default_node_as()
bgp_as += " (inherited)"
x.add_row([host,
data["ip_addr_v4"],
data["ip_addr_v6"],
bgp_as,
"\n".join(peer_v4_list),
"\n".join(peer_v6_list)])
except DataStoreError:
print "Error connecting to etcd."
sys.exit(1)
print str(x) + "\n"
def node_bgppeer_add(ip, version, as_num):
"""
Add a new BGP peer with the supplied IP address and AS Number to this node.
:param ip: The address to add
:param version: 4 or 6
:param as_num: The peer AS Number.
:return: None
"""
address = IPAddress(ip)
peer = BGPPeer(address, as_num)
client.add_bgp_peer(version, peer, hostname=hostname)
def node_bgppeer_remove(ip, version):
"""
Remove a global BGP peer from this node.
:param ip: The address to use.
:param version: 4 or 6
:return: None
"""
address = IPAddress(ip)
try:
client.remove_bgp_peer(version, address, hostname=hostname)
except KeyError:
print "%s is not a configured peer for this node." % address
sys.exit(1)
else:
print "BGP peer removed from node configuration"
def node_bgppeer_show(version):
"""
Print a list of the BGP Peers for this node.
"""
assert version in (4, 6)
peers = client.get_bgp_peers(version, hostname=hostname)
if peers:
heading = "Node specific IPv%s BGP Peer" % version
x = PrettyTable([heading, "AS Num"], sortby=heading)
for peer in peers:
x.add_row([peer.ip, peer.as_num])
x.align = "l"
print x.get_string(sortby=heading)
else:
print "No IPv%s BGP Peers defined for this node.\n" % version
def _find_or_pull_node_image(image_name):
"""
Check if Docker has a cached copy of an image, and if not, attempt to pull
it.
:param image_name: The full name of the image.
:return: None.
"""
try:
_ = docker_client.inspect_image(image_name)
except docker.errors.APIError as err:
if err.response.status_code == 404:
# TODO: Display proper status bar
print_paragraph("Pulling Docker image %s" % image_name)
try:
# Pull the image and then verify that it was succesfully
# pulled (the pull doesn't raise an exception on failure).
docker_client.pull(image_name)
docker_client.inspect_image(image_name)
except docker.errors.APIError:
# Unable to download the Docker image.
print_paragraph("ERROR: Unable to download Docker image.")
print_paragraph("Please verify that you have network "
"connectivity to DockerHub and that, if you "
"explicitly specified which calico/node image "
"to use, the image name is correct.")
sys.exit(1)
def _attach_and_stream(container, startup_only):
"""
Attach to a container and stream its stdout and stderr output to this
process's stdout. If the user presses Ctrl-C or the process is killed,
also stop the Docker container.
If startup_only is set, then only attach until the container starts up successfully.
:param container: Docker container to attach to.
:return: None.
"""
# Register a SIGTERM handler, so we shut down the container if this
# process is kill'd.
def handle_sigterm(sig, frame):
print "Got SIGTERM"
docker_client.stop(container)
sys.exit(0)
signal.signal(signal.SIGTERM, handle_sigterm)
stop_container_on_exit = True
exit_code = 1
output = docker_client.attach(container, stream=True)
line_buf = ""
try:
for raw_data in output:
sys.stdout.write(raw_data)
if startup_only:
# We've been asked to exit after the container has started,
# look for the successful startup message. We buffer one line
# of output in case we get a split line from the output stream.
line_buf += raw_data
if "Calico node started successfully" in line_buf:
stop_container_on_exit = False
break
line_buf = line_buf.rsplit('\n')[-1]
except KeyboardInterrupt:
# Mainline. Someone pressed Ctrl-C.
print "Stopping Calico node..."
stop_container_on_exit = True
exit_code = 130
finally:
# Could either be this process is being killed, or output generator
# raises an exception.
if stop_container_on_exit:
docker_client.stop(container)
# If the container is stopped, some sort of error occurred.
sys.exit(exit_code)
|
en
| 0.780662
|
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Usage: calicoctl node [--ip=<IP>] [--ip6=<IP6>] [--node-image=<DOCKER_IMAGE_NAME>] [--runtime=<RUNTIME>] [--as=<AS_NUM>] [--log-dir=<LOG_DIR>] [--detach=<DETACH>] [--no-pull] [(--libnetwork [--libnetwork-image=<LIBNETWORK_IMAGE_NAME>])] [--backend=(bird | gobgp | none)] calicoctl node stop [--force] calicoctl node remove [--hostname=<HOSTNAME>] [--remove-endpoints] calicoctl node show calicoctl node bgp peer add <PEER_IP> as <AS_NUM> calicoctl node bgp peer remove <PEER_IP> calicoctl node bgp peer show [--ipv4 | --ipv6] Description: Configure the Calico node containers as well as default BGP information for this node. Options: --as=<AS_NUM> The default AS number for this node. --detach=<DETACH> Set "true" to run Calico service as detached, "false" to run in the foreground. When using libnetwork, this may not be set to "false". When using --runtime=rkt, --detach is always false. [default: true] --force Forcefully stop the Calico node --hostname=<HOSTNAME> The hostname from which to remove the Calico node. --ip=<IP> The local management address to use. --ip6=<IP6> The local IPv6 management address to use. --ipv4 Show IPv4 information only. --ipv6 Show IPv6 information only. --libnetwork (Deprecated) Use the libnetwork plugin. --libnetwork-image=<LIBNETWORK_IMAGE_NAME> (Deprecated) This flag will be ignored. [default: calico/node-libnetwork:latest] --log-dir=<LOG_DIR> The directory for logs [default: /var/log/calico] --no-pull Prevent from pulling the Calico node Docker images. --node-image=<DOCKER_IMAGE_NAME> Docker image to use for Calico's per-node container. [default: calico/node:latest] --remove-endpoints Remove the endpoint data when deleting the node from the Calico network. --runtime=<RUNTIME> Specify how Calico services should be launched. When set to "docker" or "rkt", services will be launched via the calico-node container, whereas a value of "none" will not launch them at all. [default: docker] --backend=<BACKEND> Specify which networking backend to use. Choices are "bird", "gobgp" or "none". When set to "none", Calico node run in policy only mode. Validate argument values: <IP> <IP6> <PEER_IP> <AS_NUM> <DETACH> Arguments not validated: <DOCKER_IMAGE_NAME> <LOG_DIR> :param arguments: Docopt processed arguments # Validate IPs # Print error message # Exit if not valid argument Weighs both the deprecated CALICO_NETWORKING_ENV and the new --backend from docopt to determine which backend should be used. Ideally, we could use the docopt [default: bird], but until we've finished deprecating CALICO_NETWORKING_ENV, we need to be able to consider every combination of the two variables. :param docopt_backend: The docopt value for --backend. Should be "bird", "gobgp", "none", or None (if they are using CALICO_NETWORKING_ENV instead). :return: # If backend was specified via docopt, use it, as command line args take precedence over ENV vars. # Otherwise, check if they are using the old binary flag: CALICO_NETWORK_ENV # Neither environment variable nor command line passed, use default: bird. # environment variable passed to disable Bird. use: none # environment variable passed as assumed default. use: bird. Main dispatcher for node commands. Calls the corresponding helper function. :param arguments: A dictionary of arguments already processed through this file's docstring with docopt :return: None # Set libnetwork_enabled to False if --libnetwork flag is not passed Create the calico-node container and establish Calico networking on this host. :param ip: The IPv4 address of the host. :param node_image: The calico-node image to use. :param ip6: The IPv6 address of the host (or None if not configured) :param as_num: The BGP AS Number to use for this node. If not specified the global default value will be used. :param detach: True to run in Docker's "detached" mode, False to run attached. :param libnetwork_enabled: True to run libnetwork plugin inside calico-node. :param no_pull: Boolean, True to prevent function from pulling the Calico node Docker images. :param backend: String, backend choice. Should be "bird", "none", or "gobgp". :return: None. # The command has to be run as root to access iptables and services # Normally, Felix will load the modules it needs, but when running inside a # container it might not be able to do so. Ensure the required modules are # loaded each time the node starts. # We only make a best effort attempt because the command may fail if the # modules are built in. # We'll warn during the check_system() if the modules are unavailable. # Print warnings for any known system issues before continuing # Ensure log directory and /var/run/calico exist so that they can be # mounted into the containers. # The format of the authority and endpoints strings have already been # validated. # Get etcd SSL environment variables if they exist Start the main Calico node container. :param ip: The IPv4 address of the host. :param ip6: The IPv6 address of the host (or None if not configured) :param as_num: The AS number for the host :param log_dir: The log directory to use. :param libnetwork_enabled: True to run libnetwork plugin inside calico-node. :param detach: True to run in Docker's "detached" mode, False to run attached. :param etcd_envs: Etcd environment variables to pass into the container :param etcd_volumes: List of mount_paths for etcd files to mount on the container :param etcd_binds: Dictionary of host file and mount file pairs for etcd files to mount on the container :param no_pull: Boolean, True to prevent function from pulling the Calico node Docker image. :return: None. # Make sure the required image is pulled before removing the old one. # This minimizes downtime during upgrade. # This is to convert libnetwork_enabled (bool) into a string to pass it as an ENV var value # Additional rw bind (/run/docker/plugins) necessory when libnetwork is enabled # Add /run/docker/plugins to the list of volumes to be mounted when libnetwork is enabled # Print a message to indicate libnetwork plugin is running when libnetwork is enabled Start the main Calico node container using rkt :param ip: The IPv4 address of the host. :param ip6: The IPv6 address of the host (or None if not configured) :param as_num: The AS number for the host. :param node_image: The calico-node image to use. :param etcd_envs: Etcd environment variables to pass into the container :param etcd_volumes: List of mount_paths for etcd files to mount on the container :param etcd_binds: Dictionary of host file and mount file pairs for etcd files to mount on the container :param backend: :return: None. # The default image is being used so convert to the rkt format. # TODO No support for SSL (etcd binds) yet # Maybe in future we'll want to have a configurable path for the # stage1-fly.aci but for now use the following algorithm # 1) If there is a file in the current directory, use that. # 2) Otherwise use the file from the default location. # # This allows the image to be overridden (e.g. if using a custom version of # rkt on CoreOS where the default file can't be updated) Ensure that IP forwarding is enabled. :return: None # Enable IP forwarding since all compute hosts are vRouters. # IPv4 forwarding should be enabled already by docker. A common problem on Linux systems is running out of space in the conntrack table, which can cause poor iptables performance. This can happen if you run a lot of workloads on a given host, or if your workloads create a lot of TCP connections or bidirectional UDP streams. To avoid this becoming a problem, we recommend increasing the conntrack table size. To do so, run the following commands: #system-configuration for more details" Stop the Calico node. This stops the containers (calico/node and calico/node-libnetwork) that are started by calicoctl node. # The command has to be run as root to stop the calico-node service # systemctl not installed, ignore error. Remove a node from the Calico network. :param remove_endpoints: Whether the endpoint data should be forcibly removed. :param host: The hostname of the host whose node will be removed, or None if removing this host's node. :return: None. # Remove the veths, and release all IPs associated with the endpoints. To # release the IPs, we construct a set of all IP addresses across all # endpoints (this assumes the endpoint nets are all single IPs). # Remove the IPAM host data. # If the host had an IPIP tunnel address, release it back to the IPAM pool # so that we don't leak it when we delete the config. Check if a container is currently running or not. :param container_name: The container name or ID. :return: True if running, otherwise False. Show hostname and node information for each node in the Calico cluster. # Set up output table # Get dictionary of host data, indexed by hostname # Combine BGP peer IP and AS numbers into single values Add a new BGP peer with the supplied IP address and AS Number to this node. :param ip: The address to add :param version: 4 or 6 :param as_num: The peer AS Number. :return: None Remove a global BGP peer from this node. :param ip: The address to use. :param version: 4 or 6 :return: None Print a list of the BGP Peers for this node. Check if Docker has a cached copy of an image, and if not, attempt to pull it. :param image_name: The full name of the image. :return: None. # TODO: Display proper status bar # Pull the image and then verify that it was succesfully # pulled (the pull doesn't raise an exception on failure). # Unable to download the Docker image. Attach to a container and stream its stdout and stderr output to this process's stdout. If the user presses Ctrl-C or the process is killed, also stop the Docker container. If startup_only is set, then only attach until the container starts up successfully. :param container: Docker container to attach to. :return: None. # Register a SIGTERM handler, so we shut down the container if this # process is kill'd. # We've been asked to exit after the container has started, # look for the successful startup message. We buffer one line # of output in case we get a split line from the output stream. # Mainline. Someone pressed Ctrl-C. # Could either be this process is being killed, or output generator # raises an exception. # If the container is stopped, some sort of error occurred.
| 1.500494
| 2
|
nuitka/nodes/NodeMakingHelpers.py
|
accnops/Nuitka
| 0
|
6625590
|
<gh_stars>0
# Copyright 2018, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" These are just helpers to create nodes, often to replace existing nodes
These are for use in optimizations and computations, and therefore cover
mostly exceptions and constants.
Often cyclic dependencies kicks in, which is why this module is mostly only
imported locally. Note: It's intended to be reversed, this module will make
the local imports instead, as these local imports look ugly everywhere else,
making it more difficult to use.
"""
from logging import warning
from nuitka.Builtins import builtin_names
from nuitka.Constants import isConstant
from nuitka.Options import isDebug, shallWarnImplicitRaises
from nuitka.PythonVersions import python_version
def makeConstantReplacementNode(constant, node):
from .ConstantRefNodes import makeConstantRefNode
return makeConstantRefNode(
constant = constant,
source_ref = node.getSourceReference()
)
def makeRaiseExceptionReplacementExpression(expression, exception_type,
exception_value):
from .ExceptionNodes import ExpressionRaiseException
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
source_ref = expression.getSourceReference()
assert type(exception_type) is str
if shallWarnImplicitRaises():
warning(
'%s: Will always raise exception: "%s(%s)"',
source_ref.getAsString(),
exception_type,
exception_value
)
result = ExpressionRaiseException(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeConstantReplacementNode(
constant = exception_value,
node = expression
),
source_ref = source_ref
)
return result
def makeRaiseExceptionReplacementStatement(statement, exception_type,
exception_value):
from .ExceptionNodes import StatementRaiseExceptionImplicit
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
source_ref = statement.getSourceReference()
assert type(exception_type) is str
if shallWarnImplicitRaises():
warning(
'%s: Will always raise exception: "%s(%s)"',
source_ref.getAsString(),
exception_type,
exception_value
)
result = StatementRaiseExceptionImplicit(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeConstantReplacementNode(
constant = exception_value,
node = statement
),
exception_cause = None,
exception_trace = None,
source_ref = source_ref
)
return result
def makeRaiseExceptionReplacementExpressionFromInstance(expression, exception):
assert isinstance(exception, Exception)
args = exception.args
if type(args) is tuple and len(args) == 1:
value = args[0]
else:
assert type(args) is tuple
value = args
return makeRaiseExceptionReplacementExpression(
expression = expression,
exception_type = exception.__class__.__name__,
exception_value = value
)
def makeRaiseExceptionExpressionFromTemplate(exception_type, template,
template_args, source_ref):
from .ExceptionNodes import ExpressionRaiseException
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
from .OperatorNodes import makeBinaryOperationNode
from .ConstantRefNodes import makeConstantRefNode
from .ContainerMakingNodes import ExpressionMakeTuple
if type(template_args) is tuple:
template_args = ExpressionMakeTuple(
elements = template_args,
source_ref = source_ref
)
return ExpressionRaiseException(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeBinaryOperationNode(
operator = "Mod",
left = makeConstantRefNode(
constant = template,
source_ref = source_ref,
user_provided = True
),
right = template_args,
source_ref = source_ref
),
source_ref = source_ref
)
def makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue(template,
operation,
original_node,
value_node):
shape = value_node.getTypeShape()
type_name = shape.getTypeName()
if type_name is not None:
result = makeRaiseExceptionReplacementExpressionFromInstance(
expression = original_node,
exception = TypeError(template % type_name if '%' in template else template)
)
result = wrapExpressionWithNodeSideEffects(
new_node = result,
old_node = value_node
)
else:
from .AttributeNodes import ExpressionAttributeLookup
from .TypeNodes import ExpressionBuiltinType1
source_ref = original_node.getSourceReference()
result = makeRaiseExceptionExpressionFromTemplate(
exception_type = "TypeError",
template = "object of type '%s' has no len()",
template_args = ExpressionAttributeLookup(
source = ExpressionBuiltinType1(
value = value_node.makeClone(),
source_ref = source_ref
),
attribute_name = "__name__",
source_ref = source_ref
),
source_ref = source_ref
)
type_name = shape.__name__
return result, "new_raise", "Raising for use of '%s' on %s '%s'." % (
operation,
"type" if type_name is not None else "shape",
type_name
)
def makeCompileTimeConstantReplacementNode(value, node):
# This needs to match code in isCompileTimeConstantValue
if isConstant(value):
return makeConstantReplacementNode(
constant = value,
node = node
)
elif type(value) is type:
if value.__name__ in builtin_names:
from .BuiltinRefNodes import makeExpressionBuiltinRef
return makeExpressionBuiltinRef(
builtin_name = value.__name__,
source_ref = node.getSourceReference()
)
else:
return node
else:
return node
def getComputationResult(node, computation, description):
""" With a computation function, execute it and return constant result or
exception node.
"""
# Try and turn raised exceptions into static raises. pylint: disable=broad-except
try:
result = computation()
except Exception as e:
new_node = makeRaiseExceptionReplacementExpressionFromInstance(
expression = node,
exception = e
)
change_tags = "new_raise"
change_desc = description + " Predicted to raise an exception."
else:
new_node = makeCompileTimeConstantReplacementNode(
value = result,
node = node
)
if isDebug():
assert new_node is not node, (node, result)
if new_node is not node:
change_tags = "new_constant"
change_desc = description + " Predicted constant result."
else:
change_tags = None
change_desc = None
return new_node, change_tags, change_desc
def makeStatementExpressionOnlyReplacementNode(expression, node):
from .StatementNodes import StatementExpressionOnly
return StatementExpressionOnly(
expression = expression,
source_ref = node.getSourceReference()
)
def mergeStatements(statements, allow_none = False):
""" Helper function that merges nested statement sequences. """
merged_statements = []
for statement in statements:
if statement is None and allow_none:
pass
elif type(statement) in (tuple, list):
merged_statements += mergeStatements(statement, allow_none)
elif statement.isStatement() or statement.isStatementsFrame():
merged_statements.append(statement)
elif statement.isStatementsSequence():
merged_statements.extend(mergeStatements(statement.getStatements()))
else:
assert False, statement
return merged_statements
def makeStatementsSequenceReplacementNode(statements, node):
from .StatementNodes import StatementsSequence
return StatementsSequence(
statements = mergeStatements(statements),
source_ref = node.getSourceReference()
)
def convertNoneConstantToNone(node):
if node is None:
return None
elif node.isExpressionConstantRef() and node.getConstant() is None:
return None
else:
return node
def wrapExpressionWithSideEffects(side_effects, old_node, new_node):
assert new_node.isExpression()
from .SideEffectNodes import ExpressionSideEffects
if side_effects:
side_effects = sum(
(
side_effect.extractSideEffects()
for side_effect in
side_effects
if side_effect.mayHaveSideEffects()
),
()
)
if side_effects:
new_node = ExpressionSideEffects(
expression = new_node,
side_effects = side_effects,
source_ref = old_node.getSourceReference()
)
return new_node
def wrapExpressionWithNodeSideEffects(new_node, old_node):
return wrapExpressionWithSideEffects(
side_effects = old_node.extractSideEffects(),
old_node = old_node,
new_node = new_node
)
def wrapStatementWithSideEffects(new_node, old_node, allow_none = False):
assert new_node is not None or allow_none
side_effects = old_node.extractSideEffects()
if side_effects:
from .StatementNodes import StatementExpressionOnly
side_effects = tuple(
StatementExpressionOnly(
expression = side_effect,
source_ref = side_effect.getSourceReference()
)
for side_effect in side_effects
)
if new_node is not None:
new_node = makeStatementsSequenceReplacementNode(
statements = side_effects + (new_node,),
node = old_node
)
else:
new_node = makeStatementsSequenceReplacementNode(
statements = side_effects,
node = old_node
)
return new_node
def makeStatementOnlyNodesFromExpressions(expressions):
from .StatementNodes import StatementExpressionOnly, StatementsSequence
statements = tuple(
StatementExpressionOnly(
expression = expression,
source_ref = expression.getSourceReference()
)
for expression in expressions
)
if not statements:
return None
elif len(statements) == 1:
return statements[0]
else:
return StatementsSequence(
statements = statements,
source_ref = statements[0].getSourceReference()
)
def makeVariableRefNode(variable, source_ref):
if variable.isTempVariable():
from .VariableRefNodes import ExpressionTempVariableRef
return ExpressionTempVariableRef(
variable = variable,
source_ref = source_ref
)
else:
from .VariableRefNodes import ExpressionVariableRef
return ExpressionVariableRef(
variable = variable,
source_ref = source_ref
)
def makeExpressionBuiltinLocals(provider, source_ref):
while provider.isExpressionOutlineFunction():
provider = provider.getParentVariableProvider()
if provider.isCompiledPythonModule():
from .GlobalsLocalsNodes import ExpressionBuiltinGlobals
return ExpressionBuiltinGlobals(
source_ref = source_ref
)
else:
from .GlobalsLocalsNodes import (
ExpressionBuiltinLocalsCopy,
ExpressionBuiltinLocalsRef,
ExpressionBuiltinLocalsUpdated
)
if provider.isExpressionClassBody():
return ExpressionBuiltinLocalsRef(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
elif python_version >= 300 or provider.isUnoptimized():
assert provider.getFunctionLocalsScope(), provider
return ExpressionBuiltinLocalsUpdated(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
else:
# TODO: Make this not true, there ought to be always a locals
# scope.
assert not provider.getFunctionLocalsScope(), provider
return ExpressionBuiltinLocalsCopy(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
|
# Copyright 2018, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" These are just helpers to create nodes, often to replace existing nodes
These are for use in optimizations and computations, and therefore cover
mostly exceptions and constants.
Often cyclic dependencies kicks in, which is why this module is mostly only
imported locally. Note: It's intended to be reversed, this module will make
the local imports instead, as these local imports look ugly everywhere else,
making it more difficult to use.
"""
from logging import warning
from nuitka.Builtins import builtin_names
from nuitka.Constants import isConstant
from nuitka.Options import isDebug, shallWarnImplicitRaises
from nuitka.PythonVersions import python_version
def makeConstantReplacementNode(constant, node):
from .ConstantRefNodes import makeConstantRefNode
return makeConstantRefNode(
constant = constant,
source_ref = node.getSourceReference()
)
def makeRaiseExceptionReplacementExpression(expression, exception_type,
exception_value):
from .ExceptionNodes import ExpressionRaiseException
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
source_ref = expression.getSourceReference()
assert type(exception_type) is str
if shallWarnImplicitRaises():
warning(
'%s: Will always raise exception: "%s(%s)"',
source_ref.getAsString(),
exception_type,
exception_value
)
result = ExpressionRaiseException(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeConstantReplacementNode(
constant = exception_value,
node = expression
),
source_ref = source_ref
)
return result
def makeRaiseExceptionReplacementStatement(statement, exception_type,
exception_value):
from .ExceptionNodes import StatementRaiseExceptionImplicit
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
source_ref = statement.getSourceReference()
assert type(exception_type) is str
if shallWarnImplicitRaises():
warning(
'%s: Will always raise exception: "%s(%s)"',
source_ref.getAsString(),
exception_type,
exception_value
)
result = StatementRaiseExceptionImplicit(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeConstantReplacementNode(
constant = exception_value,
node = statement
),
exception_cause = None,
exception_trace = None,
source_ref = source_ref
)
return result
def makeRaiseExceptionReplacementExpressionFromInstance(expression, exception):
assert isinstance(exception, Exception)
args = exception.args
if type(args) is tuple and len(args) == 1:
value = args[0]
else:
assert type(args) is tuple
value = args
return makeRaiseExceptionReplacementExpression(
expression = expression,
exception_type = exception.__class__.__name__,
exception_value = value
)
def makeRaiseExceptionExpressionFromTemplate(exception_type, template,
template_args, source_ref):
from .ExceptionNodes import ExpressionRaiseException
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
from .OperatorNodes import makeBinaryOperationNode
from .ConstantRefNodes import makeConstantRefNode
from .ContainerMakingNodes import ExpressionMakeTuple
if type(template_args) is tuple:
template_args = ExpressionMakeTuple(
elements = template_args,
source_ref = source_ref
)
return ExpressionRaiseException(
exception_type = ExpressionBuiltinExceptionRef(
exception_name = exception_type,
source_ref = source_ref
),
exception_value = makeBinaryOperationNode(
operator = "Mod",
left = makeConstantRefNode(
constant = template,
source_ref = source_ref,
user_provided = True
),
right = template_args,
source_ref = source_ref
),
source_ref = source_ref
)
def makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue(template,
operation,
original_node,
value_node):
shape = value_node.getTypeShape()
type_name = shape.getTypeName()
if type_name is not None:
result = makeRaiseExceptionReplacementExpressionFromInstance(
expression = original_node,
exception = TypeError(template % type_name if '%' in template else template)
)
result = wrapExpressionWithNodeSideEffects(
new_node = result,
old_node = value_node
)
else:
from .AttributeNodes import ExpressionAttributeLookup
from .TypeNodes import ExpressionBuiltinType1
source_ref = original_node.getSourceReference()
result = makeRaiseExceptionExpressionFromTemplate(
exception_type = "TypeError",
template = "object of type '%s' has no len()",
template_args = ExpressionAttributeLookup(
source = ExpressionBuiltinType1(
value = value_node.makeClone(),
source_ref = source_ref
),
attribute_name = "__name__",
source_ref = source_ref
),
source_ref = source_ref
)
type_name = shape.__name__
return result, "new_raise", "Raising for use of '%s' on %s '%s'." % (
operation,
"type" if type_name is not None else "shape",
type_name
)
def makeCompileTimeConstantReplacementNode(value, node):
# This needs to match code in isCompileTimeConstantValue
if isConstant(value):
return makeConstantReplacementNode(
constant = value,
node = node
)
elif type(value) is type:
if value.__name__ in builtin_names:
from .BuiltinRefNodes import makeExpressionBuiltinRef
return makeExpressionBuiltinRef(
builtin_name = value.__name__,
source_ref = node.getSourceReference()
)
else:
return node
else:
return node
def getComputationResult(node, computation, description):
""" With a computation function, execute it and return constant result or
exception node.
"""
# Try and turn raised exceptions into static raises. pylint: disable=broad-except
try:
result = computation()
except Exception as e:
new_node = makeRaiseExceptionReplacementExpressionFromInstance(
expression = node,
exception = e
)
change_tags = "new_raise"
change_desc = description + " Predicted to raise an exception."
else:
new_node = makeCompileTimeConstantReplacementNode(
value = result,
node = node
)
if isDebug():
assert new_node is not node, (node, result)
if new_node is not node:
change_tags = "new_constant"
change_desc = description + " Predicted constant result."
else:
change_tags = None
change_desc = None
return new_node, change_tags, change_desc
def makeStatementExpressionOnlyReplacementNode(expression, node):
from .StatementNodes import StatementExpressionOnly
return StatementExpressionOnly(
expression = expression,
source_ref = node.getSourceReference()
)
def mergeStatements(statements, allow_none = False):
""" Helper function that merges nested statement sequences. """
merged_statements = []
for statement in statements:
if statement is None and allow_none:
pass
elif type(statement) in (tuple, list):
merged_statements += mergeStatements(statement, allow_none)
elif statement.isStatement() or statement.isStatementsFrame():
merged_statements.append(statement)
elif statement.isStatementsSequence():
merged_statements.extend(mergeStatements(statement.getStatements()))
else:
assert False, statement
return merged_statements
def makeStatementsSequenceReplacementNode(statements, node):
from .StatementNodes import StatementsSequence
return StatementsSequence(
statements = mergeStatements(statements),
source_ref = node.getSourceReference()
)
def convertNoneConstantToNone(node):
if node is None:
return None
elif node.isExpressionConstantRef() and node.getConstant() is None:
return None
else:
return node
def wrapExpressionWithSideEffects(side_effects, old_node, new_node):
assert new_node.isExpression()
from .SideEffectNodes import ExpressionSideEffects
if side_effects:
side_effects = sum(
(
side_effect.extractSideEffects()
for side_effect in
side_effects
if side_effect.mayHaveSideEffects()
),
()
)
if side_effects:
new_node = ExpressionSideEffects(
expression = new_node,
side_effects = side_effects,
source_ref = old_node.getSourceReference()
)
return new_node
def wrapExpressionWithNodeSideEffects(new_node, old_node):
return wrapExpressionWithSideEffects(
side_effects = old_node.extractSideEffects(),
old_node = old_node,
new_node = new_node
)
def wrapStatementWithSideEffects(new_node, old_node, allow_none = False):
assert new_node is not None or allow_none
side_effects = old_node.extractSideEffects()
if side_effects:
from .StatementNodes import StatementExpressionOnly
side_effects = tuple(
StatementExpressionOnly(
expression = side_effect,
source_ref = side_effect.getSourceReference()
)
for side_effect in side_effects
)
if new_node is not None:
new_node = makeStatementsSequenceReplacementNode(
statements = side_effects + (new_node,),
node = old_node
)
else:
new_node = makeStatementsSequenceReplacementNode(
statements = side_effects,
node = old_node
)
return new_node
def makeStatementOnlyNodesFromExpressions(expressions):
from .StatementNodes import StatementExpressionOnly, StatementsSequence
statements = tuple(
StatementExpressionOnly(
expression = expression,
source_ref = expression.getSourceReference()
)
for expression in expressions
)
if not statements:
return None
elif len(statements) == 1:
return statements[0]
else:
return StatementsSequence(
statements = statements,
source_ref = statements[0].getSourceReference()
)
def makeVariableRefNode(variable, source_ref):
if variable.isTempVariable():
from .VariableRefNodes import ExpressionTempVariableRef
return ExpressionTempVariableRef(
variable = variable,
source_ref = source_ref
)
else:
from .VariableRefNodes import ExpressionVariableRef
return ExpressionVariableRef(
variable = variable,
source_ref = source_ref
)
def makeExpressionBuiltinLocals(provider, source_ref):
while provider.isExpressionOutlineFunction():
provider = provider.getParentVariableProvider()
if provider.isCompiledPythonModule():
from .GlobalsLocalsNodes import ExpressionBuiltinGlobals
return ExpressionBuiltinGlobals(
source_ref = source_ref
)
else:
from .GlobalsLocalsNodes import (
ExpressionBuiltinLocalsCopy,
ExpressionBuiltinLocalsRef,
ExpressionBuiltinLocalsUpdated
)
if provider.isExpressionClassBody():
return ExpressionBuiltinLocalsRef(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
elif python_version >= 300 or provider.isUnoptimized():
assert provider.getFunctionLocalsScope(), provider
return ExpressionBuiltinLocalsUpdated(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
else:
# TODO: Make this not true, there ought to be always a locals
# scope.
assert not provider.getFunctionLocalsScope(), provider
return ExpressionBuiltinLocalsCopy(
locals_scope = provider.getFunctionLocalsScope(),
source_ref = source_ref
)
|
en
| 0.874062
|
# Copyright 2018, <NAME>, mailto:<EMAIL> # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # These are just helpers to create nodes, often to replace existing nodes These are for use in optimizations and computations, and therefore cover mostly exceptions and constants. Often cyclic dependencies kicks in, which is why this module is mostly only imported locally. Note: It's intended to be reversed, this module will make the local imports instead, as these local imports look ugly everywhere else, making it more difficult to use. # This needs to match code in isCompileTimeConstantValue With a computation function, execute it and return constant result or exception node. # Try and turn raised exceptions into static raises. pylint: disable=broad-except Helper function that merges nested statement sequences. # TODO: Make this not true, there ought to be always a locals # scope.
| 1.549071
| 2
|
code/tasks/HANNA/flags.py
|
weituo12321/PREVALENT_HANNA
| 0
|
6625591
|
import argparse
def make_parser():
parser = argparse.ArgumentParser()
# Meta
parser.add_argument('-config_file', type=str,
help='configuration file')
parser.add_argument('-data_dir', type=str, default='hanna',
help='data directory')
parser.add_argument('-data_prefix', type=str, default='hanna',
help='data file prefix')
parser.add_argument('-load_path', type=str,
help='path to load a pretrained model')
parser.add_argument('-exp_name', type=str,
help='name of the experiment')
parser.add_argument('-seed', type=int,
help='random seed')
parser.add_argument('-img_features', type=str,
help='path to pretrained image embeddings')
parser.add_argument('-max_instr_len', type=int,
help='maximum number of words in a language instruction')
parser.add_argument('-device_id', type=int,
help='gpu id')
parser.add_argument('-start_point_radius', type=int,
help='Start point radius')
# Model
parser.add_argument('-img_feature_size', type=int, default=2048,
help='image embedding size')
parser.add_argument('-word_embed_size', type=int,
help='word embedding size')
parser.add_argument('-action_embed_size', type=int,
help='navigation action embedding size')
parser.add_argument('-ask_embed_size', type=int,
help='ask action embedding size')
parser.add_argument('-loc_embed_size', type=int,
help='camera angles embedding size')
parser.add_argument('-hidden_size', type=int,
help='number of hidden units')
parser.add_argument('-attention_heads', type=int,
help='Number of attention heads')
parser.add_argument('-num_layers', type=int,
help='Number of transformer layers')
parser.add_argument('-dropout_ratio', type=float,
help='dropout probability')
# Training
parser.add_argument('-alpha', type=float,
help='Curiosity-encouraging loss weight')
parser.add_argument('-lr', type=float,
help='learning rate')
parser.add_argument('-weight_decay', type=float,
help='L2-regularization weight')
parser.add_argument('-n_iters', type=int,
help='number of training iterations (batches)')
parser.add_argument('-batch_size', type=int,
help='batch size (both training and evaluation)')
parser.add_argument('-train_episode_len', type=int,
help='maximum number of time steps during training')
parser.add_argument('-start_lr_decay', type=int,
help='iteration to start decaying learning rate')
parser.add_argument('-lr_decay_rate', type=float,
help='learning rate decay rate')
parser.add_argument('-decay_lr_every', type=int,
help='number of iterations between learning rate decays')
parser.add_argument('-log_every', type=int,
help='number of iterations between information loggings')
# Evaluation
parser.add_argument('-eval_episode_len', type=int,
help='maximum number of time steps during evaluation')
parser.add_argument('-success_radius', type=float,
help='success radius')
parser.add_argument('-eval_only', type=int,
help='evaluation mode')
parser.add_argument('-eval_on_val', type=int, default=0)
# Non-learning baselines
parser.add_argument('-random_agent', type=int, default=0,
help='Agent that randomly selects navigation actions')
parser.add_argument('-forward_agent', type=int, default=0,
help='Agent that always selects action 1')
parser.add_argument('-shortest_agent', type=int, default=0,
help='Optimal shortest-path agent')
# Perfect language interpretation baseline
parser.add_argument('-perfect_interpretation', type=int, default=0,
help='provide perfect assistance interpretation')
# Ask baseline
parser.add_argument('-ask_baseline', type=str, default=None,
help='Help-request teacher baseline')
# Ablation baseline
parser.add_argument('-no_sim_attend', type=int, default=0,
help="No cosine similarity attention (beta = 0)")
parser.add_argument('-no_reason', type=int, default=0,
help="No condition (reason) prediction")
parser.add_argument('-no_reset_inter', type=int, default=0,
help="No reset inter-task module")
# Single modality baseline
parser.add_argument('-instruction_baseline', type=str, default=None,
help='Instruction type baseline')
# for dicencoder
parser.add_argument('--encoder_type', dest='encoder_type', default='dic', type=str, help='encoder type')
parser.add_argument('--d_hidden_size', dest='d_hidden_size', default=256, type=int, help='decoder hidden_size')
parser.add_argument('--d_ctx_size', dest='d_ctx_size', default=768, type=int, help='ctx hidden_size')
parser.add_argument('--d_enc_hidden_size', dest='d_enc_hidden_size', default=128, type=int, help='encoder hidden_size')
parser.add_argument('--d_dropout_ratio', dest='d_dropout_ratio', default=0.4, type=float, help='dropout_ratio')
parser.add_argument('--d_bidirectional', dest='d_bidirectional', type=bool, default=True, help='bidirectional')
parser.add_argument('--d_transformer_update', dest='d_transformer_update', type=bool, default=False, help='update Bert')
parser.add_argument('--d_update_add_layer', dest='d_update_add_layer', type=bool, default=False, help='update fusion layer in Bert')
parser.add_argument('--d_bert_n_layers', dest='d_bert_n_layers', type=int, default=1, help='bert_n_layers')
parser.add_argument('--d_reverse_input', dest='d_reverse_input', type=bool, default=True, help='reverse')
parser.add_argument('--d_top_lstm', dest='d_top_lstm', type=bool, default=True, help='add lstm to the top of transformers')
parser.add_argument('--d_vl_layers', dest='d_vl_layers', type=int, default=4, help='vl_layers')
parser.add_argument('--d_la_layers', dest='d_la_layers', type=int, default=9, help='la_layers')
parser.add_argument('--d_bert_type', dest='d_bert_type', type=str, default="small", help='small or large')
parser.add_argument('--pretrain_model_name', dest='pretrain_model_name', type=str, default=None, help='the name of pretrained model')
return parser
|
import argparse
def make_parser():
parser = argparse.ArgumentParser()
# Meta
parser.add_argument('-config_file', type=str,
help='configuration file')
parser.add_argument('-data_dir', type=str, default='hanna',
help='data directory')
parser.add_argument('-data_prefix', type=str, default='hanna',
help='data file prefix')
parser.add_argument('-load_path', type=str,
help='path to load a pretrained model')
parser.add_argument('-exp_name', type=str,
help='name of the experiment')
parser.add_argument('-seed', type=int,
help='random seed')
parser.add_argument('-img_features', type=str,
help='path to pretrained image embeddings')
parser.add_argument('-max_instr_len', type=int,
help='maximum number of words in a language instruction')
parser.add_argument('-device_id', type=int,
help='gpu id')
parser.add_argument('-start_point_radius', type=int,
help='Start point radius')
# Model
parser.add_argument('-img_feature_size', type=int, default=2048,
help='image embedding size')
parser.add_argument('-word_embed_size', type=int,
help='word embedding size')
parser.add_argument('-action_embed_size', type=int,
help='navigation action embedding size')
parser.add_argument('-ask_embed_size', type=int,
help='ask action embedding size')
parser.add_argument('-loc_embed_size', type=int,
help='camera angles embedding size')
parser.add_argument('-hidden_size', type=int,
help='number of hidden units')
parser.add_argument('-attention_heads', type=int,
help='Number of attention heads')
parser.add_argument('-num_layers', type=int,
help='Number of transformer layers')
parser.add_argument('-dropout_ratio', type=float,
help='dropout probability')
# Training
parser.add_argument('-alpha', type=float,
help='Curiosity-encouraging loss weight')
parser.add_argument('-lr', type=float,
help='learning rate')
parser.add_argument('-weight_decay', type=float,
help='L2-regularization weight')
parser.add_argument('-n_iters', type=int,
help='number of training iterations (batches)')
parser.add_argument('-batch_size', type=int,
help='batch size (both training and evaluation)')
parser.add_argument('-train_episode_len', type=int,
help='maximum number of time steps during training')
parser.add_argument('-start_lr_decay', type=int,
help='iteration to start decaying learning rate')
parser.add_argument('-lr_decay_rate', type=float,
help='learning rate decay rate')
parser.add_argument('-decay_lr_every', type=int,
help='number of iterations between learning rate decays')
parser.add_argument('-log_every', type=int,
help='number of iterations between information loggings')
# Evaluation
parser.add_argument('-eval_episode_len', type=int,
help='maximum number of time steps during evaluation')
parser.add_argument('-success_radius', type=float,
help='success radius')
parser.add_argument('-eval_only', type=int,
help='evaluation mode')
parser.add_argument('-eval_on_val', type=int, default=0)
# Non-learning baselines
parser.add_argument('-random_agent', type=int, default=0,
help='Agent that randomly selects navigation actions')
parser.add_argument('-forward_agent', type=int, default=0,
help='Agent that always selects action 1')
parser.add_argument('-shortest_agent', type=int, default=0,
help='Optimal shortest-path agent')
# Perfect language interpretation baseline
parser.add_argument('-perfect_interpretation', type=int, default=0,
help='provide perfect assistance interpretation')
# Ask baseline
parser.add_argument('-ask_baseline', type=str, default=None,
help='Help-request teacher baseline')
# Ablation baseline
parser.add_argument('-no_sim_attend', type=int, default=0,
help="No cosine similarity attention (beta = 0)")
parser.add_argument('-no_reason', type=int, default=0,
help="No condition (reason) prediction")
parser.add_argument('-no_reset_inter', type=int, default=0,
help="No reset inter-task module")
# Single modality baseline
parser.add_argument('-instruction_baseline', type=str, default=None,
help='Instruction type baseline')
# for dicencoder
parser.add_argument('--encoder_type', dest='encoder_type', default='dic', type=str, help='encoder type')
parser.add_argument('--d_hidden_size', dest='d_hidden_size', default=256, type=int, help='decoder hidden_size')
parser.add_argument('--d_ctx_size', dest='d_ctx_size', default=768, type=int, help='ctx hidden_size')
parser.add_argument('--d_enc_hidden_size', dest='d_enc_hidden_size', default=128, type=int, help='encoder hidden_size')
parser.add_argument('--d_dropout_ratio', dest='d_dropout_ratio', default=0.4, type=float, help='dropout_ratio')
parser.add_argument('--d_bidirectional', dest='d_bidirectional', type=bool, default=True, help='bidirectional')
parser.add_argument('--d_transformer_update', dest='d_transformer_update', type=bool, default=False, help='update Bert')
parser.add_argument('--d_update_add_layer', dest='d_update_add_layer', type=bool, default=False, help='update fusion layer in Bert')
parser.add_argument('--d_bert_n_layers', dest='d_bert_n_layers', type=int, default=1, help='bert_n_layers')
parser.add_argument('--d_reverse_input', dest='d_reverse_input', type=bool, default=True, help='reverse')
parser.add_argument('--d_top_lstm', dest='d_top_lstm', type=bool, default=True, help='add lstm to the top of transformers')
parser.add_argument('--d_vl_layers', dest='d_vl_layers', type=int, default=4, help='vl_layers')
parser.add_argument('--d_la_layers', dest='d_la_layers', type=int, default=9, help='la_layers')
parser.add_argument('--d_bert_type', dest='d_bert_type', type=str, default="small", help='small or large')
parser.add_argument('--pretrain_model_name', dest='pretrain_model_name', type=str, default=None, help='the name of pretrained model')
return parser
|
en
| 0.582385
|
# Meta # Model # Training # Evaluation # Non-learning baselines # Perfect language interpretation baseline # Ask baseline # Ablation baseline # Single modality baseline # for dicencoder
| 2.594331
| 3
|
test.py
|
billzorn/msp-pymodel
| 0
|
6625592
|
<filename>test.py
#!/usr/bin/env python3
import sys
import os
import random
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib')
sys.path.append(libdir)
from msp_isa import isa
import msp_itable as itab
import msp_fr5969_model as model
import msp_elftools as elftools
scratch_start = 0x1c00
scratch_end = 0x2400
scratch_size = scratch_end - scratch_start
storage_start = 0x4400
storage_end = 0x6000
storage_size = storage_end - storage_start
prog_start = 0x6000
prog_end = 0xf000
prog_size = prog_end - prog_start
r_imm_state = 0
def r_imm():
global r_imm_state
r_imm_state += 1
return r_imm_state
#return random.randint(0, 65535)
def r_addr():
return random.randint(scratch_start, scratch_end-1)
def has_immediate(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '#N'}:
return True
elif mode in {'Rn', '#1', '@Rn', '@Rn+'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_addr(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '@Rn', '@Rn+'}:
return True
elif mode in {'Rn', '#1', '#N'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_reg(mode):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return True
elif mode in {'ADDR', '&ADDR', '#1', '#N'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def assemble(fmt, name, smode, dmode, fields):
ins = isa.modes_to_instr(fmt, name, smode, dmode)
words = isa.inhabitant(ins, fields)
return words
def mov_iaddr(i, addr):
return assemble('fmt1', 'MOV', '#N', '&ADDR', {'isrc':i, 'idst':addr, 'bw':0})
def mov_irn(i, rn):
return assemble('fmt1', 'MOV', '#N', 'Rn', {'isrc':i, 'rdst':rn, 'bw':0})
# put stuff into the register so that we hit this address
def setup_mode(mode, rn, addr):
if mode in {'X(Rn)'}:
# split address between register and immediate
reg_part = random.randint(0, addr-1)
addr_part = addr - reg_part
assert(reg_part >=0 and addr_part >= 0 and reg_part + addr_part == addr)
return addr_part, mov_irn(reg_part, rn)
elif mode in {'@Rn', '@Rn+'}:
# set the register
return addr, mov_irn(addr, rn)
# # for debugging: show the intended addr
# elif mode in {'ADDR'}:
# return addr, mov_iaddr(0, addr)
else:
return addr, []
def cleanup_reg_addr(rn, addr):
return mov_irn(r_imm(), rn) + mov_iaddr(r_imm(), addr)
def emit_read_timer(rn):
return assemble('fmt1', 'MOV', '&ADDR', 'Rn', {'isrc':0x350, 'rdst':rn, 'bw':0})
def emit_compute_store_timer(r1, r2, addr):
return (assemble('fmt1', 'SUB', 'Rn', 'Rn', {'rsrc':r1, 'rdst':r2, 'bw':0}) +
assemble('fmt1', 'MOV', 'Rn', '&ADDR', {'rsrc':r2, 'idst':addr, 'bw':1}))
def emit_disable_watchdog():
return mov_iaddr(23168, 0x015c)
def emit_timer_start():
return (mov_iaddr(16, 0x0342) +
mov_iaddr(512, 0x0340) +
mov_iaddr(0, 0x0350) +
mov_iaddr(50000, 0x352) +
assemble('fmt1', 'BIS', '#N', '&ADDR', {'isrc':16, 'idst':0x0340, 'bw':0}))
def emit_fmt1(name, smode, dmode, rsrc, rdst, old_pc, n = 1):
if rsrc in {0, 2, 3} and not smode in {'Rn'}:
raise ValueError('{:s} r{:s}: bad idea'.format(smode, rsrc))
saddr, ssetup = setup_mode(smode, rsrc, r_addr())
full_daddr = r_addr()
daddr, dsetup = setup_mode(dmode, rdst, full_daddr)
words = ssetup + dsetup
pc = old_pc + (len(words) * 2)
ins = isa.modes_to_instr('fmt1', name, smode, dmode)
# need to do very special things for the pc
if rdst == 0:
assert(False)
ins_words = []
for i in range(n):
new_saddr = saddr
new_daddr = daddr
# fix addresses for pc-relative addressing
offset = -2
if smode in {'ADDR'}:
new_saddr = (saddr - pc + offset) & 0xffff
if has_immediate(smode):
offset -= 2
if dmode in {'ADDR'}:
new_daddr = (daddr - pc + offset) & 0xffff
#print('{:s}: {:x}, {:x}, {:x}'.format(ins.name, pc, saddr, daddr))
# note that we haven't done anything to change the value that starts
# in the register if it's data (Rn mode), we'll just use whatever's there and
# it's fine
# we do need to choose a random immediate for #N mode though
if smode in {'#N'}:
new_saddr = r_imm() # bad variable names, ehh
fields = {'bw':random.randint(0,1)}
if uses_reg(smode):
fields['rsrc'] = rsrc
if has_immediate(smode):
fields['isrc'] = new_saddr
if uses_reg(dmode):
fields['rdst'] = rdst
if has_immediate(dmode):
fields['idst'] = new_daddr
ins_words += isa.inhabitant(ins, fields)
pc += ins.length
# print()
# print(hex(old_pc))
# print(smode, dmode, rsrc, rdst)
# ins.describe()
# print(repr(fields))
# print([hex(w) for w in ins_words])
# cleanup, we expect to diverge from the physical device in situations where we're reading the SR, etc...
cleanup = cleanup_reg_addr(rdst, full_daddr)
return words + ins_words + cleanup
# word based
def init_scratch(size):
return [r_imm() for _ in range(size // 2)]
def init_storage(size):
return [0 for _ in range(size // 2)]
def gen(n):
scratch_words = init_scratch(scratch_size)
storage_words = init_storage(storage_size)
trn_1 = 14
trn_2 = 15
old_pc = prog_start
store = storage_start
words = emit_disable_watchdog() + emit_timer_start()
old_pc += len(words) * 2
for name in itab.fmt1['instructions']:
#for name in ['ADD']:
if name not in {'DADD'}:
for smode in itab.fmt1['smodes']:
for dmode in itab.fmt1['dmodes']:
#for dmode in ['Rn', 'X(Rn)', '&ADDR']:
for rsrc in [0,1,2,3,4]:
for rdst in [5]:
try:
pc = old_pc
t1_words = emit_read_timer(trn_1)
pc += len(t1_words) * 2
fmt1_words = emit_fmt1(name, smode, dmode, rsrc, rdst, pc, n=n)
pc += len(fmt1_words) * 2
t2_words = emit_read_timer(trn_2)
store_words = emit_compute_store_timer(trn_1, trn_2, store)
pc += (len(t2_words) + len(store_words)) * 2
except ValueError as e:
#print(e)
pass
else:
store += 1
old_pc = pc
words += t1_words + fmt1_words + t2_words + store_words
assert(old_pc < prog_end and store < storage_end)
# create the model
state = model.Model()
write16 = model.mk_write16(state.write8)
for i in range(len(scratch_words)):
write16(scratch_start + (i*2), scratch_words[i])
for i in range(len(storage_words)):
write16(storage_start + (i*2), storage_words[i])
for i in range(len(words)):
write16(prog_start + (i*2), words[i])
# halt
for i in range(8):
write16(prog_start + (len(words)*2) + (i*2), 0x3fff)
# reset
write16(model.resetvec, prog_start)
return state
if __name__ == '__main__':
# for i in range(len(isa.ids_ins)):
# ins = isa.idx_to_instr(i)
# modes = isa.idx_to_modes(i)
# print(modes)
# print(ins.live_fields())
# fields = {'rsrc':7, 'rdst':15, 'bw':0}
# for smode in itab.fmt1['smodes']:
# for dmode in itab.fmt1['dmodes']:
# print(smode, dmode)
# ins = isa.modes_to_instr('fmt1', 'CMP', smode, dmode)
# ins.describe()
# f = fields.copy()
# if has_immediate(smode):
# f['isrc'] = 17777
# if has_immediate(dmode):
# f['idst'] = 0x4400
# print(f)
# words = isa.inhabitant(ins, f)
# print(ins.live_fields())
# print(words)
tname = sys.argv[1]
state = gen()
state.dump()
elftools.save(state, tname)
|
<filename>test.py
#!/usr/bin/env python3
import sys
import os
import random
libdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'lib')
sys.path.append(libdir)
from msp_isa import isa
import msp_itable as itab
import msp_fr5969_model as model
import msp_elftools as elftools
scratch_start = 0x1c00
scratch_end = 0x2400
scratch_size = scratch_end - scratch_start
storage_start = 0x4400
storage_end = 0x6000
storage_size = storage_end - storage_start
prog_start = 0x6000
prog_end = 0xf000
prog_size = prog_end - prog_start
r_imm_state = 0
def r_imm():
global r_imm_state
r_imm_state += 1
return r_imm_state
#return random.randint(0, 65535)
def r_addr():
return random.randint(scratch_start, scratch_end-1)
def has_immediate(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '#N'}:
return True
elif mode in {'Rn', '#1', '@Rn', '@Rn+'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_addr(mode):
if mode in {'X(Rn)', 'ADDR', '&ADDR', '@Rn', '@Rn+'}:
return True
elif mode in {'Rn', '#1', '#N'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def uses_reg(mode):
if mode in {'Rn', 'X(Rn)', '@Rn', '@Rn+'}:
return True
elif mode in {'ADDR', '&ADDR', '#1', '#N'}:
return False
else:
raise ValueError('not an addressing mode: {:s}'.format(mode))
def assemble(fmt, name, smode, dmode, fields):
ins = isa.modes_to_instr(fmt, name, smode, dmode)
words = isa.inhabitant(ins, fields)
return words
def mov_iaddr(i, addr):
return assemble('fmt1', 'MOV', '#N', '&ADDR', {'isrc':i, 'idst':addr, 'bw':0})
def mov_irn(i, rn):
return assemble('fmt1', 'MOV', '#N', 'Rn', {'isrc':i, 'rdst':rn, 'bw':0})
# put stuff into the register so that we hit this address
def setup_mode(mode, rn, addr):
if mode in {'X(Rn)'}:
# split address between register and immediate
reg_part = random.randint(0, addr-1)
addr_part = addr - reg_part
assert(reg_part >=0 and addr_part >= 0 and reg_part + addr_part == addr)
return addr_part, mov_irn(reg_part, rn)
elif mode in {'@Rn', '@Rn+'}:
# set the register
return addr, mov_irn(addr, rn)
# # for debugging: show the intended addr
# elif mode in {'ADDR'}:
# return addr, mov_iaddr(0, addr)
else:
return addr, []
def cleanup_reg_addr(rn, addr):
return mov_irn(r_imm(), rn) + mov_iaddr(r_imm(), addr)
def emit_read_timer(rn):
return assemble('fmt1', 'MOV', '&ADDR', 'Rn', {'isrc':0x350, 'rdst':rn, 'bw':0})
def emit_compute_store_timer(r1, r2, addr):
return (assemble('fmt1', 'SUB', 'Rn', 'Rn', {'rsrc':r1, 'rdst':r2, 'bw':0}) +
assemble('fmt1', 'MOV', 'Rn', '&ADDR', {'rsrc':r2, 'idst':addr, 'bw':1}))
def emit_disable_watchdog():
return mov_iaddr(23168, 0x015c)
def emit_timer_start():
return (mov_iaddr(16, 0x0342) +
mov_iaddr(512, 0x0340) +
mov_iaddr(0, 0x0350) +
mov_iaddr(50000, 0x352) +
assemble('fmt1', 'BIS', '#N', '&ADDR', {'isrc':16, 'idst':0x0340, 'bw':0}))
def emit_fmt1(name, smode, dmode, rsrc, rdst, old_pc, n = 1):
if rsrc in {0, 2, 3} and not smode in {'Rn'}:
raise ValueError('{:s} r{:s}: bad idea'.format(smode, rsrc))
saddr, ssetup = setup_mode(smode, rsrc, r_addr())
full_daddr = r_addr()
daddr, dsetup = setup_mode(dmode, rdst, full_daddr)
words = ssetup + dsetup
pc = old_pc + (len(words) * 2)
ins = isa.modes_to_instr('fmt1', name, smode, dmode)
# need to do very special things for the pc
if rdst == 0:
assert(False)
ins_words = []
for i in range(n):
new_saddr = saddr
new_daddr = daddr
# fix addresses for pc-relative addressing
offset = -2
if smode in {'ADDR'}:
new_saddr = (saddr - pc + offset) & 0xffff
if has_immediate(smode):
offset -= 2
if dmode in {'ADDR'}:
new_daddr = (daddr - pc + offset) & 0xffff
#print('{:s}: {:x}, {:x}, {:x}'.format(ins.name, pc, saddr, daddr))
# note that we haven't done anything to change the value that starts
# in the register if it's data (Rn mode), we'll just use whatever's there and
# it's fine
# we do need to choose a random immediate for #N mode though
if smode in {'#N'}:
new_saddr = r_imm() # bad variable names, ehh
fields = {'bw':random.randint(0,1)}
if uses_reg(smode):
fields['rsrc'] = rsrc
if has_immediate(smode):
fields['isrc'] = new_saddr
if uses_reg(dmode):
fields['rdst'] = rdst
if has_immediate(dmode):
fields['idst'] = new_daddr
ins_words += isa.inhabitant(ins, fields)
pc += ins.length
# print()
# print(hex(old_pc))
# print(smode, dmode, rsrc, rdst)
# ins.describe()
# print(repr(fields))
# print([hex(w) for w in ins_words])
# cleanup, we expect to diverge from the physical device in situations where we're reading the SR, etc...
cleanup = cleanup_reg_addr(rdst, full_daddr)
return words + ins_words + cleanup
# word based
def init_scratch(size):
return [r_imm() for _ in range(size // 2)]
def init_storage(size):
return [0 for _ in range(size // 2)]
def gen(n):
scratch_words = init_scratch(scratch_size)
storage_words = init_storage(storage_size)
trn_1 = 14
trn_2 = 15
old_pc = prog_start
store = storage_start
words = emit_disable_watchdog() + emit_timer_start()
old_pc += len(words) * 2
for name in itab.fmt1['instructions']:
#for name in ['ADD']:
if name not in {'DADD'}:
for smode in itab.fmt1['smodes']:
for dmode in itab.fmt1['dmodes']:
#for dmode in ['Rn', 'X(Rn)', '&ADDR']:
for rsrc in [0,1,2,3,4]:
for rdst in [5]:
try:
pc = old_pc
t1_words = emit_read_timer(trn_1)
pc += len(t1_words) * 2
fmt1_words = emit_fmt1(name, smode, dmode, rsrc, rdst, pc, n=n)
pc += len(fmt1_words) * 2
t2_words = emit_read_timer(trn_2)
store_words = emit_compute_store_timer(trn_1, trn_2, store)
pc += (len(t2_words) + len(store_words)) * 2
except ValueError as e:
#print(e)
pass
else:
store += 1
old_pc = pc
words += t1_words + fmt1_words + t2_words + store_words
assert(old_pc < prog_end and store < storage_end)
# create the model
state = model.Model()
write16 = model.mk_write16(state.write8)
for i in range(len(scratch_words)):
write16(scratch_start + (i*2), scratch_words[i])
for i in range(len(storage_words)):
write16(storage_start + (i*2), storage_words[i])
for i in range(len(words)):
write16(prog_start + (i*2), words[i])
# halt
for i in range(8):
write16(prog_start + (len(words)*2) + (i*2), 0x3fff)
# reset
write16(model.resetvec, prog_start)
return state
if __name__ == '__main__':
# for i in range(len(isa.ids_ins)):
# ins = isa.idx_to_instr(i)
# modes = isa.idx_to_modes(i)
# print(modes)
# print(ins.live_fields())
# fields = {'rsrc':7, 'rdst':15, 'bw':0}
# for smode in itab.fmt1['smodes']:
# for dmode in itab.fmt1['dmodes']:
# print(smode, dmode)
# ins = isa.modes_to_instr('fmt1', 'CMP', smode, dmode)
# ins.describe()
# f = fields.copy()
# if has_immediate(smode):
# f['isrc'] = 17777
# if has_immediate(dmode):
# f['idst'] = 0x4400
# print(f)
# words = isa.inhabitant(ins, f)
# print(ins.live_fields())
# print(words)
tname = sys.argv[1]
state = gen()
state.dump()
elftools.save(state, tname)
|
en
| 0.68824
|
#!/usr/bin/env python3 #return random.randint(0, 65535) # put stuff into the register so that we hit this address # split address between register and immediate # set the register # # for debugging: show the intended addr # elif mode in {'ADDR'}: # return addr, mov_iaddr(0, addr) # need to do very special things for the pc # fix addresses for pc-relative addressing #print('{:s}: {:x}, {:x}, {:x}'.format(ins.name, pc, saddr, daddr)) # note that we haven't done anything to change the value that starts # in the register if it's data (Rn mode), we'll just use whatever's there and # it's fine # we do need to choose a random immediate for #N mode though # bad variable names, ehh # print() # print(hex(old_pc)) # print(smode, dmode, rsrc, rdst) # ins.describe() # print(repr(fields)) # print([hex(w) for w in ins_words]) # cleanup, we expect to diverge from the physical device in situations where we're reading the SR, etc... # word based #for name in ['ADD']: #for dmode in ['Rn', 'X(Rn)', '&ADDR']: #print(e) # create the model # halt # reset # for i in range(len(isa.ids_ins)): # ins = isa.idx_to_instr(i) # modes = isa.idx_to_modes(i) # print(modes) # print(ins.live_fields()) # fields = {'rsrc':7, 'rdst':15, 'bw':0} # for smode in itab.fmt1['smodes']: # for dmode in itab.fmt1['dmodes']: # print(smode, dmode) # ins = isa.modes_to_instr('fmt1', 'CMP', smode, dmode) # ins.describe() # f = fields.copy() # if has_immediate(smode): # f['isrc'] = 17777 # if has_immediate(dmode): # f['idst'] = 0x4400 # print(f) # words = isa.inhabitant(ins, f) # print(ins.live_fields()) # print(words)
| 1.996692
| 2
|
deepem/test/run.py
|
ZettaAI/DeepEM
| 0
|
6625593
|
import os
import torch
from deepem.test.forward import Forward
from deepem.test.option import Options
from deepem.test.utils import *
def test(opt):
# Model
model = load_model(opt)
# Forward scan
forward = Forward(opt)
if opt.gs_input:
scanner = make_forward_scanner(opt)
output, aug_out = forward(model, scanner)
save_output(output, opt, aug_out=aug_out)
else:
for dname in opt.data_names:
scanner = make_forward_scanner(opt, data_name=dname)
output, _ = forward(model, scanner)
save_output(output, opt, data_name=dname)
if __name__ == "__main__":
# Options
opt = Options().parse()
# GPU
if not opt.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
torch.backends.cudnn.benchmark = not opt.no_autotune
# Make directories.
os.makedirs(opt.exp_dir, exist_ok=True)
os.makedirs(opt.model_dir, exist_ok=True)
# Run inference.
print(f"Running inference: {opt.exp_name}")
test(opt)
|
import os
import torch
from deepem.test.forward import Forward
from deepem.test.option import Options
from deepem.test.utils import *
def test(opt):
# Model
model = load_model(opt)
# Forward scan
forward = Forward(opt)
if opt.gs_input:
scanner = make_forward_scanner(opt)
output, aug_out = forward(model, scanner)
save_output(output, opt, aug_out=aug_out)
else:
for dname in opt.data_names:
scanner = make_forward_scanner(opt, data_name=dname)
output, _ = forward(model, scanner)
save_output(output, opt, data_name=dname)
if __name__ == "__main__":
# Options
opt = Options().parse()
# GPU
if not opt.cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_id
torch.backends.cudnn.benchmark = not opt.no_autotune
# Make directories.
os.makedirs(opt.exp_dir, exist_ok=True)
os.makedirs(opt.model_dir, exist_ok=True)
# Run inference.
print(f"Running inference: {opt.exp_name}")
test(opt)
|
en
| 0.704378
|
# Model # Forward scan # Options # GPU # Make directories. # Run inference.
| 2.189538
| 2
|
2_Fun_Python/Add_Notes.py
|
CyberThulhu22/Python-Projects
| 0
|
6625594
|
#!/usr/bin/env python3 -tt
#-*- Coding: utf-8 -*-
"""
NAME: Add_Notes.py
VERSION: 1.0
AUTHOR: <NAME> (CyberThulhu)
STATUS: Building Initial code framework
DESCRIPTION: Allows a User to Add Notes to a '.txt' Document
TO-DO:
USAGE: Add_Notes.py [-h] -o <output_file.txt> [-t]
COPYRIGHT © 2021 <NAME>
"""
# Imports
import sys
import argparse
import datetime
from os.path import exists, abspath
# Instantiate Parser
PROG_NAME = "Add_Notes"
DESC_TEXT = "Allows a User to Add Notes to a '.txt' Document"
EPIL_TEXT = "COPYRIGHT © 2021 <NAME>"
parser = argparse.ArgumentParser(prog='', description='Just a simple', epilog='', )
# Add Parser Arguments
OUTP_HELP_TEXT = 'File path to Write/Append Output'
TIME_HELP_TEXT = 'Option to Timestamp Newlines (Default: False)'
parser.add_argument('-o', dest='output', metavar=r'outfile.txt', type=str, help=OUTP_HELP_TEXT)
parser.add_argument('-t', dest='timestamp', default=False, action='store_true', type=bool, help=TIME_HELP_TEXT)
# Parse through created Arguments
pargs = parser.parse_args()
# Defined Functions
def check_file_exists():
pass
def timestamp_line() -> str:
""" Returns Time String """
return "{}".format(datetime.datetime.now().strftime("%H:%M:%S"))
def main() -> None:
""" Main Function """
while True:
try:
newline = str(input("Enter Note"))
with open(pargs.output, "a") as file:
if pargs.timestamp:
file.writelines(timestamp_line() + newline + "\n\r")
else:
file.writelines(newline + "\n\r")
except KeyboardInterrupt:
break
sys.exit(0)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3 -tt
#-*- Coding: utf-8 -*-
"""
NAME: Add_Notes.py
VERSION: 1.0
AUTHOR: <NAME> (CyberThulhu)
STATUS: Building Initial code framework
DESCRIPTION: Allows a User to Add Notes to a '.txt' Document
TO-DO:
USAGE: Add_Notes.py [-h] -o <output_file.txt> [-t]
COPYRIGHT © 2021 <NAME>
"""
# Imports
import sys
import argparse
import datetime
from os.path import exists, abspath
# Instantiate Parser
PROG_NAME = "Add_Notes"
DESC_TEXT = "Allows a User to Add Notes to a '.txt' Document"
EPIL_TEXT = "COPYRIGHT © 2021 <NAME>"
parser = argparse.ArgumentParser(prog='', description='Just a simple', epilog='', )
# Add Parser Arguments
OUTP_HELP_TEXT = 'File path to Write/Append Output'
TIME_HELP_TEXT = 'Option to Timestamp Newlines (Default: False)'
parser.add_argument('-o', dest='output', metavar=r'outfile.txt', type=str, help=OUTP_HELP_TEXT)
parser.add_argument('-t', dest='timestamp', default=False, action='store_true', type=bool, help=TIME_HELP_TEXT)
# Parse through created Arguments
pargs = parser.parse_args()
# Defined Functions
def check_file_exists():
pass
def timestamp_line() -> str:
""" Returns Time String """
return "{}".format(datetime.datetime.now().strftime("%H:%M:%S"))
def main() -> None:
""" Main Function """
while True:
try:
newline = str(input("Enter Note"))
with open(pargs.output, "a") as file:
if pargs.timestamp:
file.writelines(timestamp_line() + newline + "\n\r")
else:
file.writelines(newline + "\n\r")
except KeyboardInterrupt:
break
sys.exit(0)
if __name__ == "__main__":
main()
|
en
| 0.508412
|
#!/usr/bin/env python3 -tt #-*- Coding: utf-8 -*- NAME: Add_Notes.py VERSION: 1.0 AUTHOR: <NAME> (CyberThulhu) STATUS: Building Initial code framework DESCRIPTION: Allows a User to Add Notes to a '.txt' Document TO-DO: USAGE: Add_Notes.py [-h] -o <output_file.txt> [-t] COPYRIGHT © 2021 <NAME> # Imports # Instantiate Parser # Add Parser Arguments # Parse through created Arguments # Defined Functions Returns Time String Main Function
| 3.490396
| 3
|
scripts/release/main.py
|
Naios/lvgl
| 593
|
6625595
|
<reponame>Naios/lvgl
#!/usr/bin/env python
import os.path
from os import path
from datetime import date
import sys
import com
import release
import dev
import proj
upstream_org_url = "https://github.com/lvgl/"
workdir = "./release_tmp"
proj_list = [ "lv_sim_eclipse_sdl", "lv_sim_emscripten"]
def upstream(repo):
return upstream_org_url + repo + ".git"
def clone(repo):
com.cmd("git clone --recurse-submodules " + upstream(repo))
os.chdir("./" + repo)
com.cmd("git checkout master")
com.cmd("git remote update origin --prune")
com.cmd("git pull origin --tags")
os.chdir("..")
def clone_repos():
com.cmd("rm -fr " + workdir)
com.cmd("mkdir " + workdir)
os.chdir(workdir)
clone("lvgl")
clone("lv_examples")
clone("lv_drivers")
clone("docs")
clone("blog")
for p in proj_list:
clone(p)
def cleanup():
os.chdir("../")
com.cmd("rm -fr " + workdir)
if __name__ == '__main__':
prepare_type = ['major', 'minor', 'bugfix']
dev_prepare = 'bugfix'
# if(len(sys.argv) != 2):
# print("Missing argument. Usage ./release.py bugfix | minor | major")
# print("Use minor by deafult")
# else:
# dev_prepare = sys.argv[1]
if not (dev_prepare in prepare_type):
print("Invalid argument. Usage ./release.py bugfix | minor | major")
exit(1)
clone_repos()
release.make()
os.chdir(workdir)
for p in proj_list:
proj.make(p, True)
dev.make(dev_prepare)
#cleanup()
|
#!/usr/bin/env python
import os.path
from os import path
from datetime import date
import sys
import com
import release
import dev
import proj
upstream_org_url = "https://github.com/lvgl/"
workdir = "./release_tmp"
proj_list = [ "lv_sim_eclipse_sdl", "lv_sim_emscripten"]
def upstream(repo):
return upstream_org_url + repo + ".git"
def clone(repo):
com.cmd("git clone --recurse-submodules " + upstream(repo))
os.chdir("./" + repo)
com.cmd("git checkout master")
com.cmd("git remote update origin --prune")
com.cmd("git pull origin --tags")
os.chdir("..")
def clone_repos():
com.cmd("rm -fr " + workdir)
com.cmd("mkdir " + workdir)
os.chdir(workdir)
clone("lvgl")
clone("lv_examples")
clone("lv_drivers")
clone("docs")
clone("blog")
for p in proj_list:
clone(p)
def cleanup():
os.chdir("../")
com.cmd("rm -fr " + workdir)
if __name__ == '__main__':
prepare_type = ['major', 'minor', 'bugfix']
dev_prepare = 'bugfix'
# if(len(sys.argv) != 2):
# print("Missing argument. Usage ./release.py bugfix | minor | major")
# print("Use minor by deafult")
# else:
# dev_prepare = sys.argv[1]
if not (dev_prepare in prepare_type):
print("Invalid argument. Usage ./release.py bugfix | minor | major")
exit(1)
clone_repos()
release.make()
os.chdir(workdir)
for p in proj_list:
proj.make(p, True)
dev.make(dev_prepare)
#cleanup()
|
en
| 0.267031
|
#!/usr/bin/env python # if(len(sys.argv) != 2): # print("Missing argument. Usage ./release.py bugfix | minor | major") # print("Use minor by deafult") # else: # dev_prepare = sys.argv[1] #cleanup()
| 2.227901
| 2
|
main_eval.py
|
ajbondmk/IntelligentNavigationOfTextBasedGames
| 1
|
6625596
|
<reponame>ajbondmk/IntelligentNavigationOfTextBasedGames
from run_agents import dqn_agent_eval_zero_shot
dqn_agent_eval_zero_shot("tmp", "tmp")
# TODO: DELETE THIS FILE
|
from run_agents import dqn_agent_eval_zero_shot
dqn_agent_eval_zero_shot("tmp", "tmp")
# TODO: DELETE THIS FILE
|
en
| 0.219271
|
# TODO: DELETE THIS FILE
| 1.228854
| 1
|
scons/wii.py
|
marstau/shinsango
| 1
|
6625597
|
import os
import utils
from SCons.Environment import Environment
from SCons.Script import Exit
def wii(env):
bin_path = "%s/bin" % os.environ['DEVKITPPC']
ogc_bin_path = "%s/libogc/bin" % os.environ['DEVKITPRO']
prefix = 'powerpc-eabi-'
def setup(x):
return '%s%s' % (prefix, x)
env['CC'] = setup('gcc')
env['LD'] = setup('ld')
env['CXX'] = setup('g++')
env['AS'] = setup('as')
env['AR'] = setup('ar')
env['OBJCOPY'] = setup('objcopy')
if utils.isWindows():
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO'],
"%s/libogc/include/SDL" % os.environ['DEVKITPRO'],
"%s/libogc/include/freetype2" % os.environ['DEVKITPRO']])
env.Append(LIBPATH = ["%s/libogc/lib" % os.environ['DEVKITPRO'],
"%s/libogc/lib/wii" % os.environ['DEVKITPRO']])
env.Append(LIBS = ['SDL', 'SDL_image', 'SDL_mixer', 'png', 'freetype', 'z'])
else:
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO']])
env.Append(CPPDEFINES = ['GEKKO', 'WII'])
flags = ['-mrvl', '-mcpu=750', '-meabi', '-mhard-float']
env.Append(CCFLAGS = flags)
env.Append(CXXFLAGS = flags)
env.Append(LINKFLAGS = flags)
# env.Append(CPPPATH = ['#src/wii'])
env['LINKCOM'] = '$CXX $LINKFLAGS -Wl,--start-group $ARCHIVES $SOURCES $_LIBDIRFLAGS $_LIBFLAGS -Wl,--end-group -o $TARGET'
env.Append(LIBS = ['wiiuse', 'wiikeyboard', 'iberty', 'bte', 'fat', 'ogc', 'm'])
# os.environ['PATH'] = "%s:%s:%s" % (bin_path, ogc_bin_path, os.environ['PATH'])
env.PrependENVPath('PATH', bin_path)
env.PrependENVPath('PATH', ogc_bin_path)
env.Append(CPPDEFINES = ['USE_SDL_MAIN'])
utils.safeParseConfig(env, 'sdl-config --cflags --libs')
utils.safeParseConfig(env, 'freetype-config --libs --cflags')
utils.safeParseConfig(env, 'libpng-config --libs --cflags')
return env
def checkPython(context):
context.Result(0)
return 0
def getEnvironment():
import utils
environment = Environment(ENV = os.environ)
environment['PAINTOWN_PLATFORM'] = ['wii', 'sdl']
peg_color = 'light-cyan'
environment['PAINTOWN_BACKEND'] = 'sdl'
environment['PAINTOWN_USE_PRX'] = False
environment['PAINTOWN_TESTS'] = {'CheckPython': checkPython}
environment['PAINTOWN_COLORIZE'] = utils.colorize
environment['PAINTOWN_NETWORKING'] = False
environment['LIBS'] = []
environment['PEG_MAKE'] = "%s %s" % (utils.colorize('Creating peg parser', peg_color), utils.colorize('$TARGET', 'light-blue'))
environment.Append(BUILDERS = {'Peg' : utils.pegBuilder(environment)})
environment.Append(CPPPATH = ['#src', '#src/util/network/hawknl'])
environment.Append(CPPDEFINES = ['USE_SDL'])
return utils.lessVerbose(wii(environment))
|
import os
import utils
from SCons.Environment import Environment
from SCons.Script import Exit
def wii(env):
bin_path = "%s/bin" % os.environ['DEVKITPPC']
ogc_bin_path = "%s/libogc/bin" % os.environ['DEVKITPRO']
prefix = 'powerpc-eabi-'
def setup(x):
return '%s%s' % (prefix, x)
env['CC'] = setup('gcc')
env['LD'] = setup('ld')
env['CXX'] = setup('g++')
env['AS'] = setup('as')
env['AR'] = setup('ar')
env['OBJCOPY'] = setup('objcopy')
if utils.isWindows():
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO'],
"%s/libogc/include/SDL" % os.environ['DEVKITPRO'],
"%s/libogc/include/freetype2" % os.environ['DEVKITPRO']])
env.Append(LIBPATH = ["%s/libogc/lib" % os.environ['DEVKITPRO'],
"%s/libogc/lib/wii" % os.environ['DEVKITPRO']])
env.Append(LIBS = ['SDL', 'SDL_image', 'SDL_mixer', 'png', 'freetype', 'z'])
else:
env.Append(CPPPATH = ["%s/libogc/include" % os.environ['DEVKITPRO']])
env.Append(CPPDEFINES = ['GEKKO', 'WII'])
flags = ['-mrvl', '-mcpu=750', '-meabi', '-mhard-float']
env.Append(CCFLAGS = flags)
env.Append(CXXFLAGS = flags)
env.Append(LINKFLAGS = flags)
# env.Append(CPPPATH = ['#src/wii'])
env['LINKCOM'] = '$CXX $LINKFLAGS -Wl,--start-group $ARCHIVES $SOURCES $_LIBDIRFLAGS $_LIBFLAGS -Wl,--end-group -o $TARGET'
env.Append(LIBS = ['wiiuse', 'wiikeyboard', 'iberty', 'bte', 'fat', 'ogc', 'm'])
# os.environ['PATH'] = "%s:%s:%s" % (bin_path, ogc_bin_path, os.environ['PATH'])
env.PrependENVPath('PATH', bin_path)
env.PrependENVPath('PATH', ogc_bin_path)
env.Append(CPPDEFINES = ['USE_SDL_MAIN'])
utils.safeParseConfig(env, 'sdl-config --cflags --libs')
utils.safeParseConfig(env, 'freetype-config --libs --cflags')
utils.safeParseConfig(env, 'libpng-config --libs --cflags')
return env
def checkPython(context):
context.Result(0)
return 0
def getEnvironment():
import utils
environment = Environment(ENV = os.environ)
environment['PAINTOWN_PLATFORM'] = ['wii', 'sdl']
peg_color = 'light-cyan'
environment['PAINTOWN_BACKEND'] = 'sdl'
environment['PAINTOWN_USE_PRX'] = False
environment['PAINTOWN_TESTS'] = {'CheckPython': checkPython}
environment['PAINTOWN_COLORIZE'] = utils.colorize
environment['PAINTOWN_NETWORKING'] = False
environment['LIBS'] = []
environment['PEG_MAKE'] = "%s %s" % (utils.colorize('Creating peg parser', peg_color), utils.colorize('$TARGET', 'light-blue'))
environment.Append(BUILDERS = {'Peg' : utils.pegBuilder(environment)})
environment.Append(CPPPATH = ['#src', '#src/util/network/hawknl'])
environment.Append(CPPDEFINES = ['USE_SDL'])
return utils.lessVerbose(wii(environment))
|
en
| 0.311406
|
# env.Append(CPPPATH = ['#src/wii']) # os.environ['PATH'] = "%s:%s:%s" % (bin_path, ogc_bin_path, os.environ['PATH'])
| 2.263215
| 2
|
asst2_hil00/factorials.py
|
hibalubbad/hiba.baddie
| 0
|
6625598
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 15:49:04 2018
@author: misskeisha
"""
def factorial(n):
a = 1
for i in range(n,0, -1):
a = a *i
return a
for i in range(10):
if i%2 != 0:
print(factorial(i))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 15:49:04 2018
@author: misskeisha
"""
def factorial(n):
a = 1
for i in range(n,0, -1):
a = a *i
return a
for i in range(10):
if i%2 != 0:
print(factorial(i))
|
en
| 0.581022
|
#!/usr/bin/env python3 # -*- coding: utf-8 -*- Created on Tue Sep 18 15:49:04 2018 @author: misskeisha
| 4.154957
| 4
|
tweetcatcher/catcher.py
|
Vanclief/tweet-catcher
| 5
|
6625599
|
<reponame>Vanclief/tweet-catcher
import yaml
from influxdb import InfluxDBClient
from tweetcatcher.client import Client
from tweepy import OAuthHandler
class Catcher(object):
def __init__(self):
with open("config.yml", 'r') as config_file:
config = yaml.load(config_file)
self.db_client = self._create_db_client(config)
self.twitter_auth = self._create_twitter_auth(config)
def _create_db_client(self, config):
""" Init a new InfluxDB client """
host = config['database']['host']
port = config['database']['port']
user = config['database']['user']
password = config['database']['password']
db_name = config['database']['name']
return InfluxDBClient(host, port, user, password, db_name)
def _create_twitter_auth(self, config):
""" Create a new Auth for Twitter """
# Get twitter api configuration
consumer_key = config['twitter_api']['consumer_key']
consumer_secret = config['twitter_api']['consumer_secret']
access_token = config['twitter_api']['access_token']
access_token_secret = config['twitter_api']['access_token_secret']
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return auth
def run(self):
""" Start Catching tweets"""
print("Creating new Twitter client")
client = Client(self.twitter_auth, self.db_client)
try:
client.run()
except:
self.run()
if __name__ == '__main__':
CATCHER = Catcher()
CATCHER.run()
|
import yaml
from influxdb import InfluxDBClient
from tweetcatcher.client import Client
from tweepy import OAuthHandler
class Catcher(object):
def __init__(self):
with open("config.yml", 'r') as config_file:
config = yaml.load(config_file)
self.db_client = self._create_db_client(config)
self.twitter_auth = self._create_twitter_auth(config)
def _create_db_client(self, config):
""" Init a new InfluxDB client """
host = config['database']['host']
port = config['database']['port']
user = config['database']['user']
password = config['database']['password']
db_name = config['database']['name']
return InfluxDBClient(host, port, user, password, db_name)
def _create_twitter_auth(self, config):
""" Create a new Auth for Twitter """
# Get twitter api configuration
consumer_key = config['twitter_api']['consumer_key']
consumer_secret = config['twitter_api']['consumer_secret']
access_token = config['twitter_api']['access_token']
access_token_secret = config['twitter_api']['access_token_secret']
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
return auth
def run(self):
""" Start Catching tweets"""
print("Creating new Twitter client")
client = Client(self.twitter_auth, self.db_client)
try:
client.run()
except:
self.run()
if __name__ == '__main__':
CATCHER = Catcher()
CATCHER.run()
|
en
| 0.388487
|
Init a new InfluxDB client Create a new Auth for Twitter # Get twitter api configuration Start Catching tweets
| 2.636281
| 3
|
Introduccion SNMP/5-AdministracionDeRendimiento/Parte1-LineaBase/getSNMP.py
|
NacxitCotuha/ASR-2022-4CM13
| 0
|
6625600
|
<reponame>NacxitCotuha/ASR-2022-4CM13<filename>Introduccion SNMP/5-AdministracionDeRendimiento/Parte1-LineaBase/getSNMP.py<gh_stars>0
from pysnmp.hlapi import *
def consultarSNMP( comunidad: str, host: str, oid: str ):
error_indication, error_status, error_index, var_binds = next(
getCmd(
SnmpEngine(),
CommunityData(comunidad),
UdpTransportTarget((host, 161)),
ContextData(),
ObjectType(ObjectIdentity(oid))
)
)
if error_indication:
print(error_indication)
elif error_status:
print('%s at %s' % (error_status.prettyPrint(), error_index and var_binds[int(error_index) - 1][0] or '?'))
else:
for var_bind in var_binds:
var_b = (' = '.join([x.prettyPrint() for x in var_bind]))
resultado = var_b.split()[2]
return resultado
|
SNMP/5-AdministracionDeRendimiento/Parte1-LineaBase/getSNMP.py<gh_stars>0
from pysnmp.hlapi import *
def consultarSNMP( comunidad: str, host: str, oid: str ):
error_indication, error_status, error_index, var_binds = next(
getCmd(
SnmpEngine(),
CommunityData(comunidad),
UdpTransportTarget((host, 161)),
ContextData(),
ObjectType(ObjectIdentity(oid))
)
)
if error_indication:
print(error_indication)
elif error_status:
print('%s at %s' % (error_status.prettyPrint(), error_index and var_binds[int(error_index) - 1][0] or '?'))
else:
for var_bind in var_binds:
var_b = (' = '.join([x.prettyPrint() for x in var_bind]))
resultado = var_b.split()[2]
return resultado
|
none
| 1
| 2.35558
| 2
|
|
Dataprocess/Criteo/config.py
|
markWJJ/AutoInt
| 975
|
6625601
|
DATA_PATH = './Criteo/'
SOURCE_DATA = './train_examples.txt'
|
DATA_PATH = './Criteo/'
SOURCE_DATA = './train_examples.txt'
|
none
| 1
| 1.026984
| 1
|
|
src/txamqp/xmlutil.py
|
sbraz/txamqp
| 17
|
6625602
|
<gh_stars>10-100
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
XML utilities used by spec.py
"""
import xml.sax
from xml.sax.handler import ContentHandler
def parse(file):
doc = Node("root")
xml.sax.parse(file, Builder(doc))
return doc
def parse_string(string):
doc = Node("root")
xml.sax.parseString(string, Builder(doc))
return doc
class Node(object):
def __init__(self, name, attrs=None, text=None, parent=None):
self.name = name
self.attrs = attrs
self.text = text
self.parent = parent
self.children = []
if parent is not None:
parent.children.append(self)
def get_bool(self, key, default=False):
v = self.get(key)
if v is None:
return default
else:
return bool(int(v))
def index(self):
if self.parent:
return self.parent.children.index(self)
else:
return 0
def has(self, key):
try:
self[key]
return True
except KeyError:
return False
except IndexError:
return False
def get(self, key, default=None):
if self.has(key):
return self[key]
else:
return default
def __getitem__(self, key):
if callable(key):
return list(filter(key, self.children))
else:
t = key.__class__
meth = "__get%s__" % t.__name__
if hasattr(self, meth):
return getattr(self, meth)(key)
else:
raise KeyError(key)
def __getstr__(self, name):
if name[:1] == "@":
return self.attrs[name[1:]]
else:
return self[lambda nd: nd.name == name]
def __getint__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def path(self):
if self.parent is None:
return "/%s" % self.name
else:
return "%s/%s" % (self.parent.path(), self.name)
class Builder(ContentHandler):
def __init__(self, start=None):
ContentHandler.__init__(self)
self.node = start
self.types = []
def __setitem__(self, element, value):
self.types[element] = value
def startElement(self, name, attrs):
self.node = Node(name, attrs, None, self.node)
def endElement(self, name):
self.node = self.node.parent
def characters(self, content):
if self.node.text is None:
self.node.text = content
else:
self.node.text += content
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
XML utilities used by spec.py
"""
import xml.sax
from xml.sax.handler import ContentHandler
def parse(file):
doc = Node("root")
xml.sax.parse(file, Builder(doc))
return doc
def parse_string(string):
doc = Node("root")
xml.sax.parseString(string, Builder(doc))
return doc
class Node(object):
def __init__(self, name, attrs=None, text=None, parent=None):
self.name = name
self.attrs = attrs
self.text = text
self.parent = parent
self.children = []
if parent is not None:
parent.children.append(self)
def get_bool(self, key, default=False):
v = self.get(key)
if v is None:
return default
else:
return bool(int(v))
def index(self):
if self.parent:
return self.parent.children.index(self)
else:
return 0
def has(self, key):
try:
self[key]
return True
except KeyError:
return False
except IndexError:
return False
def get(self, key, default=None):
if self.has(key):
return self[key]
else:
return default
def __getitem__(self, key):
if callable(key):
return list(filter(key, self.children))
else:
t = key.__class__
meth = "__get%s__" % t.__name__
if hasattr(self, meth):
return getattr(self, meth)(key)
else:
raise KeyError(key)
def __getstr__(self, name):
if name[:1] == "@":
return self.attrs[name[1:]]
else:
return self[lambda nd: nd.name == name]
def __getint__(self, index):
return self.children[index]
def __iter__(self):
return iter(self.children)
def path(self):
if self.parent is None:
return "/%s" % self.name
else:
return "%s/%s" % (self.parent.path(), self.name)
class Builder(ContentHandler):
def __init__(self, start=None):
ContentHandler.__init__(self)
self.node = start
self.types = []
def __setitem__(self, element, value):
self.types[element] = value
def startElement(self, name, attrs):
self.node = Node(name, attrs, None, self.node)
def endElement(self, name):
self.node = self.node.parent
def characters(self, content):
if self.node.text is None:
self.node.text = content
else:
self.node.text += content
|
en
| 0.865487
|
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # XML utilities used by spec.py
| 2.020196
| 2
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_slice_mgr_proxy_cfg.py
|
bopopescu/ACI
| 0
|
6625603
|
<reponame>bopopescu/ACI
""" Cisco_IOS_XR_slice_mgr_proxy_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR slice\-mgr\-proxy package configuration.
This module contains definitions
for the following management objects\:
node\-path\: Node act path
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class NodePath(Entity):
"""
Node act path
.. attribute:: node
Node (Physical location of the node in R\_S\_I format)
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath, self).__init__()
self._top_entity = None
self.yang_name = "node-path"
self.yang_parent_name = "Cisco-IOS-XR-slice-mgr-proxy-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("node", ("node", NodePath.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-slice-mgr-proxy-cfg:node-path"
def __setattr__(self, name, value):
self._perform_setattr(NodePath, [], name, value)
class Node(Entity):
"""
Node (Physical location of the node in R\_S\_I
format)
.. attribute:: node_name (key)
Location in R\_S\_I format
**type**\: str
.. attribute:: slice_ids
Slice
**type**\: :py:class:`SliceIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "node-path"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_container_classes = OrderedDict([("slice-ids", ("slice_ids", NodePath.Node.SliceIds))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('node_name', YLeaf(YType.str, 'node-name')),
])
self.node_name = None
self.slice_ids = NodePath.Node.SliceIds()
self.slice_ids.parent = self
self._children_name_map["slice_ids"] = "slice-ids"
self._children_yang_names.add("slice-ids")
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-slice-mgr-proxy-cfg:node-path/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node, ['node_name'], name, value)
class SliceIds(Entity):
"""
Slice
.. attribute:: slice_id
Slice Id on which configuration will be applied
**type**\: list of :py:class:`SliceId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds.SliceId>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node.SliceIds, self).__init__()
self.yang_name = "slice-ids"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("slice-id", ("slice_id", NodePath.Node.SliceIds.SliceId))])
self._leafs = OrderedDict()
self.slice_id = YList(self)
self._segment_path = lambda: "slice-ids"
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node.SliceIds, [], name, value)
class SliceId(Entity):
"""
Slice Id on which configuration will be
applied
.. attribute:: slice_id (key)
The identifier for this slice
**type**\: int
**range:** 0..4
.. attribute:: state
set val 0 to shutdown
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: breakout
10G Breakout Config
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: mode
set val 4 for OTU4
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node.SliceIds.SliceId, self).__init__()
self.yang_name = "slice-id"
self.yang_parent_name = "slice-ids"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['slice_id']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('slice_id', YLeaf(YType.uint32, 'slice-id')),
('state', YLeaf(YType.int32, 'state')),
('breakout', YLeaf(YType.int32, 'breakout')),
('mode', YLeaf(YType.int32, 'mode')),
])
self.slice_id = None
self.state = None
self.breakout = None
self.mode = None
self._segment_path = lambda: "slice-id" + "[slice-id='" + str(self.slice_id) + "']"
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node.SliceIds.SliceId, ['slice_id', 'state', 'breakout', 'mode'], name, value)
def clone_ptr(self):
self._top_entity = NodePath()
return self._top_entity
|
""" Cisco_IOS_XR_slice_mgr_proxy_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR slice\-mgr\-proxy package configuration.
This module contains definitions
for the following management objects\:
node\-path\: Node act path
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class NodePath(Entity):
"""
Node act path
.. attribute:: node
Node (Physical location of the node in R\_S\_I format)
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath, self).__init__()
self._top_entity = None
self.yang_name = "node-path"
self.yang_parent_name = "Cisco-IOS-XR-slice-mgr-proxy-cfg"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("node", ("node", NodePath.Node))])
self._leafs = OrderedDict()
self.node = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-slice-mgr-proxy-cfg:node-path"
def __setattr__(self, name, value):
self._perform_setattr(NodePath, [], name, value)
class Node(Entity):
"""
Node (Physical location of the node in R\_S\_I
format)
.. attribute:: node_name (key)
Location in R\_S\_I format
**type**\: str
.. attribute:: slice_ids
Slice
**type**\: :py:class:`SliceIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node, self).__init__()
self.yang_name = "node"
self.yang_parent_name = "node-path"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['node_name']
self._child_container_classes = OrderedDict([("slice-ids", ("slice_ids", NodePath.Node.SliceIds))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('node_name', YLeaf(YType.str, 'node-name')),
])
self.node_name = None
self.slice_ids = NodePath.Node.SliceIds()
self.slice_ids.parent = self
self._children_name_map["slice_ids"] = "slice-ids"
self._children_yang_names.add("slice-ids")
self._segment_path = lambda: "node" + "[node-name='" + str(self.node_name) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-slice-mgr-proxy-cfg:node-path/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node, ['node_name'], name, value)
class SliceIds(Entity):
"""
Slice
.. attribute:: slice_id
Slice Id on which configuration will be applied
**type**\: list of :py:class:`SliceId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds.SliceId>`
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node.SliceIds, self).__init__()
self.yang_name = "slice-ids"
self.yang_parent_name = "node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("slice-id", ("slice_id", NodePath.Node.SliceIds.SliceId))])
self._leafs = OrderedDict()
self.slice_id = YList(self)
self._segment_path = lambda: "slice-ids"
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node.SliceIds, [], name, value)
class SliceId(Entity):
"""
Slice Id on which configuration will be
applied
.. attribute:: slice_id (key)
The identifier for this slice
**type**\: int
**range:** 0..4
.. attribute:: state
set val 0 to shutdown
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: breakout
10G Breakout Config
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: mode
set val 4 for OTU4
**type**\: int
**range:** \-2147483648..2147483647
"""
_prefix = 'slice-mgr-proxy-cfg'
_revision = '2015-11-09'
def __init__(self):
super(NodePath.Node.SliceIds.SliceId, self).__init__()
self.yang_name = "slice-id"
self.yang_parent_name = "slice-ids"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['slice_id']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('slice_id', YLeaf(YType.uint32, 'slice-id')),
('state', YLeaf(YType.int32, 'state')),
('breakout', YLeaf(YType.int32, 'breakout')),
('mode', YLeaf(YType.int32, 'mode')),
])
self.slice_id = None
self.state = None
self.breakout = None
self.mode = None
self._segment_path = lambda: "slice-id" + "[slice-id='" + str(self.slice_id) + "']"
def __setattr__(self, name, value):
self._perform_setattr(NodePath.Node.SliceIds.SliceId, ['slice_id', 'state', 'breakout', 'mode'], name, value)
def clone_ptr(self):
self._top_entity = NodePath()
return self._top_entity
|
en
| 0.369629
|
Cisco_IOS_XR_slice_mgr_proxy_cfg This module contains a collection of YANG definitions for Cisco IOS\-XR slice\-mgr\-proxy package configuration. This module contains definitions for the following management objects\: node\-path\: Node act path Copyright (c) 2013\-2017 by Cisco Systems, Inc. All rights reserved. Node act path .. attribute:: node Node (Physical location of the node in R\_S\_I format) **type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node>` Node (Physical location of the node in R\_S\_I format) .. attribute:: node_name (key) Location in R\_S\_I format **type**\: str .. attribute:: slice_ids Slice **type**\: :py:class:`SliceIds <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds>` Slice .. attribute:: slice_id Slice Id on which configuration will be applied **type**\: list of :py:class:`SliceId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_slice_mgr_proxy_cfg.NodePath.Node.SliceIds.SliceId>` Slice Id on which configuration will be applied .. attribute:: slice_id (key) The identifier for this slice **type**\: int **range:** 0..4 .. attribute:: state set val 0 to shutdown **type**\: int **range:** \-2147483648..2147483647 .. attribute:: breakout 10G Breakout Config **type**\: int **range:** \-2147483648..2147483647 .. attribute:: mode set val 4 for OTU4 **type**\: int **range:** \-2147483648..2147483647
| 1.95117
| 2
|
LeetCode/0891_sum_subsequence_widths/subsequence_widths.py
|
NathanielBlairStahn/python-practice
| 0
|
6625604
|
<reponame>NathanielBlairStahn/python-practice
"""
From Leetcode (8/24/2018):
891. Sum of Subsequence Widths
Given an array of integers A, consider all non-empty subsequences of A.
For any sequence S, let the width of S be the difference between the maximum
and minimum element of S.
Return the sum of the widths of all subsequences of A.
As the answer may be very large, return the answer modulo 10^9 + 7.
Example 1:
Input: [2,1,3]
Output: 6
Explanation:
Subsequences are [1], [2], [3], [2,1], [2,3], [1,3], [2,1,3].
The corresponding widths are 0, 0, 0, 1, 1, 2, 2.
The sum of these widths is 6.
Note:
1 <= A.length <= 20000
1 <= A[i] <= 20000
"""
class Solution:
# def sumSubseqWidths(self, a):
# """
# :type A: List[int]
# :rtype: int
# This solution runs in quadratic time, rather than the naive exponential time solution.
# """
# #The answer is the same if we rearrange the elements of a.
# #Sorting the elements allows us to count the subsequences
# #that have a particular pair of elements as its minimum
# #and maximum - see comments below.
# a = sorted(a)
# n = len(a)
# total = 0
# modulus = 10**9 + 7
# #Rather than the naive solution that iterates through all 2**n - 1 subsequences,
# #we can compute the sum of the widths in quadratic time by grouping subsequences
# #according to the minimum and maximum values.
# for i in range(n):
# for j in range(i+1, n):
# #There are exactly 2**(j-i-1) subsequences where
# #a[i] is the minimum and a[j] is the maximum (one for
# #each subset of the elements between indices i and j),
# #so the sum of the widths for all these subsequences
# #is (a[j]-a[i]) * 2**(j-i-1).
# total += (a[j]-a[i]) * (2**(j-i-1) % modulus)
# total %= modulus
# return total
# def sumSubseqWidths(self, a):
# """
# :type a: List[int]
# :rtype: int
# This solution runs in quadratic time, but 4 times faster than the one above.
# """
# #The answer is the same if we rearrange the elements of a.
# #Sorting the elements allows us to count the subsequences
# #that have a particular pair of elements as its minimum
# #and maximum - see comments below.
# a = sorted(a)
# n = len(a)
# total = 0
# modulus = 10**9 + 7
# #Rather than the naive solution that iterates through all 2**n - 1 subsequences,
# #we can compute the sum of the widths in quadratic time by grouping subsequences
# #according to the minimum and maximum values.
# #There are exactly 2**(k-j-1) subsequences where
# #a[j] is the minimum and a[k] is the maximum (one for
# #each subset of the elements between indices j and k),
# #so the sum of the widths for all these subsequences
# #is (a[k]-a[j]) * 2**(k-j-1).
# #Moreover, the sum of the widths for which k-j = i is the same as the sum of the
# #widths for which k-j = n-i (this is not obvious until you compute some examples),
# #so by grouping these terms together, we can cut the work by a factor of 4 ((n/2)^2 vs. n^2).
# for i in range(1,(n+1)//2):
# sum_diffs = 0
# for j in range(i):
# sum_diffs += a[n-j-1] - a[j]
# total += sum_diffs * ((2**(i-1) + 2**(n-i-1)) % modulus)
# total %= modulus
# #If n is even, the widths for k-j=n/2 are not paired, so compute this case separately
# if n % 2 == 0:
# sum_diffs = 0
# for j in range(n//2):
# sum_diffs += a[n-j-1] - a[j]
# total += sum_diffs * (2**(n//2-1) % modulus)
# total %= modulus
# return total
def sumSubseqWidths(self, a):
"""
:type a: List[int]
:rtype: int
This solution runs in O(n*log(n)) time overall:
The total time is O(n*log(n)) for sorting the array, plus O(n) time
for computing the sum of widths from the sorted array.
"""
#The answer is the same if we rearrange the elements of a.
#Sorting the elements allows us to count the subsequences
#that have a particular pair of elements as its minimum
#and maximum - see comments below.
a = sorted(a) #This was too slow to pass all the test cases. Try counting sort.
#a = counting_sort_integers(a)
n = len(a)
total = 0
sum_diffs = 0
modulus = 10**9 + 7
#Rather than the naive solution that iterates through all 2**n - 1 subsequences,
#we can compute the sum of the widths in quadratic time by grouping subsequences
#according to the minimum and maximum values.
#There are exactly 2**(k-j-1) subsequences where
#a[j] is the minimum and a[k] is the maximum (one for
#each subset of the elements between indices j and k),
#so the sum of the widths for all these subsequences
#is (a[k]-a[j]) * 2**(k-j-1).
#Moreover, for i>0, the sum of the widths for which k-j = i is the same as the sum of the
#widths for which k-j = n-i, and this common sum is precisely a[n-i] - a[i-1]
#greater than the sum of the widths for which k-j = i-1. This occurs because the sum of
#widths for which k-j is fixed is a telescoping sum (to see this, compute some examples).
#This observation allows us to iterate exactly once through the sorted array in order to compute
#the total sum of the widths, so the remainder of this function runs in linear time. Thus,
#the overall runtime will be O(n log(n)) from the sorting above.
for i in range(1,(n+1)//2):
sum_diffs += a[n-i] - a[i-1]
total += sum_diffs * ((1<<(i-1) + 1<<(n-i-1)) % modulus)
if total >= modulus:
total %= modulus
#If n is even, the widths for k-j=n/2 are not paired (i.e. when i = n/2, then n-i = n/2 as well),
#so compute this case separately.
if n % 2 == 0:
i = n//2
sum_diffs += a[i] - a[i-1]
total += sum_diffs * ((1<<(i-1)) % modulus)
if total >= modulus:
total %= modulus
return total
def counting_sort_integers(values, max_val=None, min_val=None, inplace=False):
"""
Sorts an array of integers using counting_sort.
Let n = len(values), k = max_val+1
"""
if len(values) == 0:
return values if inplace else []
#Runs in O(n) time if max_val is None or min_val is None
if max_val is None:
#If both are None, find max and min simultaneously.
if min_val is None:
max_val = min_val = values[0]
for value in values[1:]:
if value > max_val:
max_val = value
elif value < min_val:
min_val = value
else:
max_val = max(values)
elif min_val is None:
min_val = min(values)
#Assume values are integers in the range 0,1,2,...,max_val
#Runs in O(k) time
counts = [0 for _ in range(min_val,max_val+1)]
#Runs in O(n) time
for value in values:
counts[value-min_val] += 1
#Overwrite values if inplace==True, otherwise create a new array for output.
#Requires O(n) time if inplace is False.
output = values if inplace else [0 for _ in range(len(values))]
#Simultaneously iterate through output and counts arrays.
#value will be the index of counts array - this is the value
#we will be storing in the output array.
value = min_val
#Iterate through output array, storing one value at a time.
#The for loop has n iterations.
#The inner while loop will have a total of k iterations.
#So the runtime for this loop is O(n+k)
for i in range(len(output)):
#Find the next value with a nonzero count.
while counts[value-min_val] == 0:
value += 1
#Store the value in the output array and decrease its count.
output[i] = value
counts[value-min_val] -= 1
#Total runtime, in iterations, is 2k+2n if max_key is passed and inplace==True.
#Another n is added if max_key is None or inplace is False, for a maximum
#runtime of 2k+4n.
return output
def counting_sort(items, key=None, max_key=None, min_key=None):
"""
Sorts an array of items by their integer keys, using counting_sort.
Implemented as a stable sort.
This is a modified version of the code described on Wikipedia:
https://en.wikipedia.org/wiki/Counting_sort
Parameters
----------
items: list of items
key: function mapping each item to an integer key
max_key: the maximum possible key
min_key: the minimum possible key
"""
#If no key functions is passed, assume items
#are integers in the range 0,1,2,...,max_key
if key is None:
key = lambda x: x
#If the maximum key wasn't specified, find it.
if max_key is None:
#If min_key is also none, find both simultaneously.
if min_key is None:
max_key = min_key = key(items[0])
for item in items[1:]:
next_key = key(item)
if next_key < min_key:
min_key = next_key
elif next_key > max_key:
max_key = next_key
else:
#(Takes time n if max_key was not specified)
max_key = max(key(item) for item in items)
#If None was passed for the minimum key, find the minimum.
elif min_key is None:
#(Takes time n if min_key was set to None)
min_key = min(key(item) for item in items)
#Initialize an array to count the occurrances of keys.
#The index is the key, and counts[key] is the count of that key.
#(Takes time K)
counts = [0 for k in range(max_key-min_key+1)]
#In case the minimum key is not 0, redefine the key function to return
#values from 0 to max_key-min_key, in order to index into the
#counts array.
shifted_key = lambda x: key(x) - min_key
#Iterate through items, to count how many times each key occurs
#(Takes time n)
for item in items:
counts[shifted_key(item)] += 1
#Rename the counts array because we will be overwriting it to store indices
#of keys instead of counts of keys.
index_of = counts
#Create the index_of array as the cumulative sum of the counts array.
#When the loop finishes, we will have
#index_of[k] = counts[0] + counts[1] + ... + counts[k-1],
#but we can do it in place, replacing count[k] with index_of[k].
#
#The value index_of[k] is the start index of the items with key(item) = k.
#In the final loop, we will increment index_of[k] each time we place an
#item with key(item) = k, so that the next time k is encountered, we'll
#have the new correct index for that key.
index=0 #Store the current index (cumulative sum of counts)
#(Takes time K)
for k, count in enumerate(counts):
#k is the shifted key, count is its count.
index_of[k] = index #Note that index_of = counts
index += count
#Create a new array for output. We can't modify the input in place
#if we want a stable sort.
#(Takes time n)
output = [None for _ in range(len(items))]
#Iterate through items, putting each item in the correct place in output.
#The index for the first item with each key k is stored in index_of[k]
#(Takes time n)
for item in items:
#Put the item in the correct index for its key.
output[index_of[shifted_key(item)]] = item
#Increment the index for the next time we encounter the key.
index_of[shifted_key(item)] += 1
#Total runtime in iterations is 2k + 3n, plus another n if max_key or min_key
#is not specified.
return output
|
"""
From Leetcode (8/24/2018):
891. Sum of Subsequence Widths
Given an array of integers A, consider all non-empty subsequences of A.
For any sequence S, let the width of S be the difference between the maximum
and minimum element of S.
Return the sum of the widths of all subsequences of A.
As the answer may be very large, return the answer modulo 10^9 + 7.
Example 1:
Input: [2,1,3]
Output: 6
Explanation:
Subsequences are [1], [2], [3], [2,1], [2,3], [1,3], [2,1,3].
The corresponding widths are 0, 0, 0, 1, 1, 2, 2.
The sum of these widths is 6.
Note:
1 <= A.length <= 20000
1 <= A[i] <= 20000
"""
class Solution:
# def sumSubseqWidths(self, a):
# """
# :type A: List[int]
# :rtype: int
# This solution runs in quadratic time, rather than the naive exponential time solution.
# """
# #The answer is the same if we rearrange the elements of a.
# #Sorting the elements allows us to count the subsequences
# #that have a particular pair of elements as its minimum
# #and maximum - see comments below.
# a = sorted(a)
# n = len(a)
# total = 0
# modulus = 10**9 + 7
# #Rather than the naive solution that iterates through all 2**n - 1 subsequences,
# #we can compute the sum of the widths in quadratic time by grouping subsequences
# #according to the minimum and maximum values.
# for i in range(n):
# for j in range(i+1, n):
# #There are exactly 2**(j-i-1) subsequences where
# #a[i] is the minimum and a[j] is the maximum (one for
# #each subset of the elements between indices i and j),
# #so the sum of the widths for all these subsequences
# #is (a[j]-a[i]) * 2**(j-i-1).
# total += (a[j]-a[i]) * (2**(j-i-1) % modulus)
# total %= modulus
# return total
# def sumSubseqWidths(self, a):
# """
# :type a: List[int]
# :rtype: int
# This solution runs in quadratic time, but 4 times faster than the one above.
# """
# #The answer is the same if we rearrange the elements of a.
# #Sorting the elements allows us to count the subsequences
# #that have a particular pair of elements as its minimum
# #and maximum - see comments below.
# a = sorted(a)
# n = len(a)
# total = 0
# modulus = 10**9 + 7
# #Rather than the naive solution that iterates through all 2**n - 1 subsequences,
# #we can compute the sum of the widths in quadratic time by grouping subsequences
# #according to the minimum and maximum values.
# #There are exactly 2**(k-j-1) subsequences where
# #a[j] is the minimum and a[k] is the maximum (one for
# #each subset of the elements between indices j and k),
# #so the sum of the widths for all these subsequences
# #is (a[k]-a[j]) * 2**(k-j-1).
# #Moreover, the sum of the widths for which k-j = i is the same as the sum of the
# #widths for which k-j = n-i (this is not obvious until you compute some examples),
# #so by grouping these terms together, we can cut the work by a factor of 4 ((n/2)^2 vs. n^2).
# for i in range(1,(n+1)//2):
# sum_diffs = 0
# for j in range(i):
# sum_diffs += a[n-j-1] - a[j]
# total += sum_diffs * ((2**(i-1) + 2**(n-i-1)) % modulus)
# total %= modulus
# #If n is even, the widths for k-j=n/2 are not paired, so compute this case separately
# if n % 2 == 0:
# sum_diffs = 0
# for j in range(n//2):
# sum_diffs += a[n-j-1] - a[j]
# total += sum_diffs * (2**(n//2-1) % modulus)
# total %= modulus
# return total
def sumSubseqWidths(self, a):
"""
:type a: List[int]
:rtype: int
This solution runs in O(n*log(n)) time overall:
The total time is O(n*log(n)) for sorting the array, plus O(n) time
for computing the sum of widths from the sorted array.
"""
#The answer is the same if we rearrange the elements of a.
#Sorting the elements allows us to count the subsequences
#that have a particular pair of elements as its minimum
#and maximum - see comments below.
a = sorted(a) #This was too slow to pass all the test cases. Try counting sort.
#a = counting_sort_integers(a)
n = len(a)
total = 0
sum_diffs = 0
modulus = 10**9 + 7
#Rather than the naive solution that iterates through all 2**n - 1 subsequences,
#we can compute the sum of the widths in quadratic time by grouping subsequences
#according to the minimum and maximum values.
#There are exactly 2**(k-j-1) subsequences where
#a[j] is the minimum and a[k] is the maximum (one for
#each subset of the elements between indices j and k),
#so the sum of the widths for all these subsequences
#is (a[k]-a[j]) * 2**(k-j-1).
#Moreover, for i>0, the sum of the widths for which k-j = i is the same as the sum of the
#widths for which k-j = n-i, and this common sum is precisely a[n-i] - a[i-1]
#greater than the sum of the widths for which k-j = i-1. This occurs because the sum of
#widths for which k-j is fixed is a telescoping sum (to see this, compute some examples).
#This observation allows us to iterate exactly once through the sorted array in order to compute
#the total sum of the widths, so the remainder of this function runs in linear time. Thus,
#the overall runtime will be O(n log(n)) from the sorting above.
for i in range(1,(n+1)//2):
sum_diffs += a[n-i] - a[i-1]
total += sum_diffs * ((1<<(i-1) + 1<<(n-i-1)) % modulus)
if total >= modulus:
total %= modulus
#If n is even, the widths for k-j=n/2 are not paired (i.e. when i = n/2, then n-i = n/2 as well),
#so compute this case separately.
if n % 2 == 0:
i = n//2
sum_diffs += a[i] - a[i-1]
total += sum_diffs * ((1<<(i-1)) % modulus)
if total >= modulus:
total %= modulus
return total
def counting_sort_integers(values, max_val=None, min_val=None, inplace=False):
"""
Sorts an array of integers using counting_sort.
Let n = len(values), k = max_val+1
"""
if len(values) == 0:
return values if inplace else []
#Runs in O(n) time if max_val is None or min_val is None
if max_val is None:
#If both are None, find max and min simultaneously.
if min_val is None:
max_val = min_val = values[0]
for value in values[1:]:
if value > max_val:
max_val = value
elif value < min_val:
min_val = value
else:
max_val = max(values)
elif min_val is None:
min_val = min(values)
#Assume values are integers in the range 0,1,2,...,max_val
#Runs in O(k) time
counts = [0 for _ in range(min_val,max_val+1)]
#Runs in O(n) time
for value in values:
counts[value-min_val] += 1
#Overwrite values if inplace==True, otherwise create a new array for output.
#Requires O(n) time if inplace is False.
output = values if inplace else [0 for _ in range(len(values))]
#Simultaneously iterate through output and counts arrays.
#value will be the index of counts array - this is the value
#we will be storing in the output array.
value = min_val
#Iterate through output array, storing one value at a time.
#The for loop has n iterations.
#The inner while loop will have a total of k iterations.
#So the runtime for this loop is O(n+k)
for i in range(len(output)):
#Find the next value with a nonzero count.
while counts[value-min_val] == 0:
value += 1
#Store the value in the output array and decrease its count.
output[i] = value
counts[value-min_val] -= 1
#Total runtime, in iterations, is 2k+2n if max_key is passed and inplace==True.
#Another n is added if max_key is None or inplace is False, for a maximum
#runtime of 2k+4n.
return output
def counting_sort(items, key=None, max_key=None, min_key=None):
"""
Sorts an array of items by their integer keys, using counting_sort.
Implemented as a stable sort.
This is a modified version of the code described on Wikipedia:
https://en.wikipedia.org/wiki/Counting_sort
Parameters
----------
items: list of items
key: function mapping each item to an integer key
max_key: the maximum possible key
min_key: the minimum possible key
"""
#If no key functions is passed, assume items
#are integers in the range 0,1,2,...,max_key
if key is None:
key = lambda x: x
#If the maximum key wasn't specified, find it.
if max_key is None:
#If min_key is also none, find both simultaneously.
if min_key is None:
max_key = min_key = key(items[0])
for item in items[1:]:
next_key = key(item)
if next_key < min_key:
min_key = next_key
elif next_key > max_key:
max_key = next_key
else:
#(Takes time n if max_key was not specified)
max_key = max(key(item) for item in items)
#If None was passed for the minimum key, find the minimum.
elif min_key is None:
#(Takes time n if min_key was set to None)
min_key = min(key(item) for item in items)
#Initialize an array to count the occurrances of keys.
#The index is the key, and counts[key] is the count of that key.
#(Takes time K)
counts = [0 for k in range(max_key-min_key+1)]
#In case the minimum key is not 0, redefine the key function to return
#values from 0 to max_key-min_key, in order to index into the
#counts array.
shifted_key = lambda x: key(x) - min_key
#Iterate through items, to count how many times each key occurs
#(Takes time n)
for item in items:
counts[shifted_key(item)] += 1
#Rename the counts array because we will be overwriting it to store indices
#of keys instead of counts of keys.
index_of = counts
#Create the index_of array as the cumulative sum of the counts array.
#When the loop finishes, we will have
#index_of[k] = counts[0] + counts[1] + ... + counts[k-1],
#but we can do it in place, replacing count[k] with index_of[k].
#
#The value index_of[k] is the start index of the items with key(item) = k.
#In the final loop, we will increment index_of[k] each time we place an
#item with key(item) = k, so that the next time k is encountered, we'll
#have the new correct index for that key.
index=0 #Store the current index (cumulative sum of counts)
#(Takes time K)
for k, count in enumerate(counts):
#k is the shifted key, count is its count.
index_of[k] = index #Note that index_of = counts
index += count
#Create a new array for output. We can't modify the input in place
#if we want a stable sort.
#(Takes time n)
output = [None for _ in range(len(items))]
#Iterate through items, putting each item in the correct place in output.
#The index for the first item with each key k is stored in index_of[k]
#(Takes time n)
for item in items:
#Put the item in the correct index for its key.
output[index_of[shifted_key(item)]] = item
#Increment the index for the next time we encounter the key.
index_of[shifted_key(item)] += 1
#Total runtime in iterations is 2k + 3n, plus another n if max_key or min_key
#is not specified.
return output
|
en
| 0.840068
|
From Leetcode (8/24/2018): 891. Sum of Subsequence Widths Given an array of integers A, consider all non-empty subsequences of A. For any sequence S, let the width of S be the difference between the maximum and minimum element of S. Return the sum of the widths of all subsequences of A. As the answer may be very large, return the answer modulo 10^9 + 7. Example 1: Input: [2,1,3] Output: 6 Explanation: Subsequences are [1], [2], [3], [2,1], [2,3], [1,3], [2,1,3]. The corresponding widths are 0, 0, 0, 1, 1, 2, 2. The sum of these widths is 6. Note: 1 <= A.length <= 20000 1 <= A[i] <= 20000 # def sumSubseqWidths(self, a): # """ # :type A: List[int] # :rtype: int # This solution runs in quadratic time, rather than the naive exponential time solution. # """ # #The answer is the same if we rearrange the elements of a. # #Sorting the elements allows us to count the subsequences # #that have a particular pair of elements as its minimum # #and maximum - see comments below. # a = sorted(a) # n = len(a) # total = 0 # modulus = 10**9 + 7 # #Rather than the naive solution that iterates through all 2**n - 1 subsequences, # #we can compute the sum of the widths in quadratic time by grouping subsequences # #according to the minimum and maximum values. # for i in range(n): # for j in range(i+1, n): # #There are exactly 2**(j-i-1) subsequences where # #a[i] is the minimum and a[j] is the maximum (one for # #each subset of the elements between indices i and j), # #so the sum of the widths for all these subsequences # #is (a[j]-a[i]) * 2**(j-i-1). # total += (a[j]-a[i]) * (2**(j-i-1) % modulus) # total %= modulus # return total # def sumSubseqWidths(self, a): # """ # :type a: List[int] # :rtype: int # This solution runs in quadratic time, but 4 times faster than the one above. # """ # #The answer is the same if we rearrange the elements of a. # #Sorting the elements allows us to count the subsequences # #that have a particular pair of elements as its minimum # #and maximum - see comments below. # a = sorted(a) # n = len(a) # total = 0 # modulus = 10**9 + 7 # #Rather than the naive solution that iterates through all 2**n - 1 subsequences, # #we can compute the sum of the widths in quadratic time by grouping subsequences # #according to the minimum and maximum values. # #There are exactly 2**(k-j-1) subsequences where # #a[j] is the minimum and a[k] is the maximum (one for # #each subset of the elements between indices j and k), # #so the sum of the widths for all these subsequences # #is (a[k]-a[j]) * 2**(k-j-1). # #Moreover, the sum of the widths for which k-j = i is the same as the sum of the # #widths for which k-j = n-i (this is not obvious until you compute some examples), # #so by grouping these terms together, we can cut the work by a factor of 4 ((n/2)^2 vs. n^2). # for i in range(1,(n+1)//2): # sum_diffs = 0 # for j in range(i): # sum_diffs += a[n-j-1] - a[j] # total += sum_diffs * ((2**(i-1) + 2**(n-i-1)) % modulus) # total %= modulus # #If n is even, the widths for k-j=n/2 are not paired, so compute this case separately # if n % 2 == 0: # sum_diffs = 0 # for j in range(n//2): # sum_diffs += a[n-j-1] - a[j] # total += sum_diffs * (2**(n//2-1) % modulus) # total %= modulus # return total :type a: List[int] :rtype: int This solution runs in O(n*log(n)) time overall: The total time is O(n*log(n)) for sorting the array, plus O(n) time for computing the sum of widths from the sorted array. #The answer is the same if we rearrange the elements of a. #Sorting the elements allows us to count the subsequences #that have a particular pair of elements as its minimum #and maximum - see comments below. #This was too slow to pass all the test cases. Try counting sort. #a = counting_sort_integers(a) #Rather than the naive solution that iterates through all 2**n - 1 subsequences, #we can compute the sum of the widths in quadratic time by grouping subsequences #according to the minimum and maximum values. #There are exactly 2**(k-j-1) subsequences where #a[j] is the minimum and a[k] is the maximum (one for #each subset of the elements between indices j and k), #so the sum of the widths for all these subsequences #is (a[k]-a[j]) * 2**(k-j-1). #Moreover, for i>0, the sum of the widths for which k-j = i is the same as the sum of the #widths for which k-j = n-i, and this common sum is precisely a[n-i] - a[i-1] #greater than the sum of the widths for which k-j = i-1. This occurs because the sum of #widths for which k-j is fixed is a telescoping sum (to see this, compute some examples). #This observation allows us to iterate exactly once through the sorted array in order to compute #the total sum of the widths, so the remainder of this function runs in linear time. Thus, #the overall runtime will be O(n log(n)) from the sorting above. #If n is even, the widths for k-j=n/2 are not paired (i.e. when i = n/2, then n-i = n/2 as well), #so compute this case separately. Sorts an array of integers using counting_sort. Let n = len(values), k = max_val+1 #Runs in O(n) time if max_val is None or min_val is None #If both are None, find max and min simultaneously. #Assume values are integers in the range 0,1,2,...,max_val #Runs in O(k) time #Runs in O(n) time #Overwrite values if inplace==True, otherwise create a new array for output. #Requires O(n) time if inplace is False. #Simultaneously iterate through output and counts arrays. #value will be the index of counts array - this is the value #we will be storing in the output array. #Iterate through output array, storing one value at a time. #The for loop has n iterations. #The inner while loop will have a total of k iterations. #So the runtime for this loop is O(n+k) #Find the next value with a nonzero count. #Store the value in the output array and decrease its count. #Total runtime, in iterations, is 2k+2n if max_key is passed and inplace==True. #Another n is added if max_key is None or inplace is False, for a maximum #runtime of 2k+4n. Sorts an array of items by their integer keys, using counting_sort. Implemented as a stable sort. This is a modified version of the code described on Wikipedia: https://en.wikipedia.org/wiki/Counting_sort Parameters ---------- items: list of items key: function mapping each item to an integer key max_key: the maximum possible key min_key: the minimum possible key #If no key functions is passed, assume items #are integers in the range 0,1,2,...,max_key #If the maximum key wasn't specified, find it. #If min_key is also none, find both simultaneously. #(Takes time n if max_key was not specified) #If None was passed for the minimum key, find the minimum. #(Takes time n if min_key was set to None) #Initialize an array to count the occurrances of keys. #The index is the key, and counts[key] is the count of that key. #(Takes time K) #In case the minimum key is not 0, redefine the key function to return #values from 0 to max_key-min_key, in order to index into the #counts array. #Iterate through items, to count how many times each key occurs #(Takes time n) #Rename the counts array because we will be overwriting it to store indices #of keys instead of counts of keys. #Create the index_of array as the cumulative sum of the counts array. #When the loop finishes, we will have #index_of[k] = counts[0] + counts[1] + ... + counts[k-1], #but we can do it in place, replacing count[k] with index_of[k]. # #The value index_of[k] is the start index of the items with key(item) = k. #In the final loop, we will increment index_of[k] each time we place an #item with key(item) = k, so that the next time k is encountered, we'll #have the new correct index for that key. #Store the current index (cumulative sum of counts) #(Takes time K) #k is the shifted key, count is its count. #Note that index_of = counts #Create a new array for output. We can't modify the input in place #if we want a stable sort. #(Takes time n) #Iterate through items, putting each item in the correct place in output. #The index for the first item with each key k is stored in index_of[k] #(Takes time n) #Put the item in the correct index for its key. #Increment the index for the next time we encounter the key. #Total runtime in iterations is 2k + 3n, plus another n if max_key or min_key #is not specified.
| 3.765021
| 4
|
src/pentest/hooks/airodump.py
|
BastienFaure/jarvis
| 14
|
6625605
|
from pentest.command import JarvisCmd
import sys
__name__ = "airodump-ng"
def main():
args_config = {
"passthrough_args": ["-w", "-H"],
"output_args": [
{
"format": None,
"arg": "-w"
}
]
}
cmd = JarvisCmd(sys.argv)
cmd.set_args_config(args_config)
cmd.run()
|
from pentest.command import JarvisCmd
import sys
__name__ = "airodump-ng"
def main():
args_config = {
"passthrough_args": ["-w", "-H"],
"output_args": [
{
"format": None,
"arg": "-w"
}
]
}
cmd = JarvisCmd(sys.argv)
cmd.set_args_config(args_config)
cmd.run()
|
none
| 1
| 1.857779
| 2
|
|
test/test_converter.py
|
Apkawa/python-video-converter
| 0
|
6625606
|
<filename>test/test_converter.py
#!/usr/bin/env python
# modify the path so that parent directory is in it
import sys
sys.path.append('../')
import random
import string
import shutil
import unittest
import os
from os.path import join as pjoin
from converter import ffmpeg, formats, avcodecs, Converter, ConverterError
def verify_progress(p):
if not p:
return False
li = list(p)
if len(li) < 1:
return False
prev = 0
for i in li:
if type(i) != int or i < 0 or i > 100:
return False
if i < prev:
return False
prev = i
return True
class TestFFMpeg(unittest.TestCase):
def setUp(self):
current_dir = os.path.abspath(os.path.dirname(__file__))
temp_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20))
self.temp_dir = pjoin(current_dir, temp_name)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
self.video_file_path = pjoin(self.temp_dir, 'output.ogg')
self.audio_file_path = pjoin(self.temp_dir, 'output.mp3')
self.shot_file_path = pjoin(self.temp_dir, 'shot.png')
self.shot2_file_path = pjoin(self.temp_dir, 'shot2.png')
self.shot3_file_path = pjoin(self.temp_dir, 'shot3.png')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def assertRaisesSpecific(self, exception, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
raise Exception('Expected exception %s not raised' % repr(exception))
except exception:
ex = sys.exc_info()[1]
return ex
@staticmethod
def ensure_notexist(f):
if os.path.exists(f):
os.unlink(f)
def test_ffmpeg_probe(self):
self.assertRaisesSpecific(ffmpeg.FFMpegError, ffmpeg.FFMpeg,
ffmpeg_path='/foo', ffprobe_path='/bar')
f = ffmpeg.FFMpeg()
self.assertEqual(None, f.probe('nonexistent'))
self.assertEqual(None, f.probe('/dev/null'))
info = f.probe('test1.ogg')
self.assertEqual('ogg', info.format.format)
self.assertAlmostEqual(33.00, info.format.duration, places=2)
self.assertEqual(2, len(info.streams))
v = info.streams[0]
self.assertEqual(v, info.video)
self.assertEqual('video', v.type)
self.assertEqual('theora', v.codec)
self.assertEqual(720, v.video_width)
self.assertEqual(400, v.video_height)
self.assertEqual(None, v.bitrate)
self.assertAlmostEqual(25.00, v.video_fps, places=2)
self.assertEqual(v.metadata['ENCODER'], 'ffmpeg2theora 0.19')
a = info.streams[1]
self.assertEqual(a, info.audio)
self.assertEqual('audio', a.type)
self.assertEqual('vorbis', a.codec)
self.assertEqual(2, a.audio_channels)
self.assertEqual(80000, a.bitrate)
self.assertEqual(48000, a.audio_samplerate)
self.assertEqual(a.metadata['ENCODER'], 'ffmpeg2theora 0.19')
self.assertEqual(repr(info), 'MediaInfo(format='
'MediaFormatInfo(format=ogg, duration=33.00), streams=['
'MediaStreamInfo(type=video, codec=theora, width=720, '
'height=400, fps=25.0, ENCODER=ffmpeg2theora 0.19), '
'MediaStreamInfo(type=audio, codec=vorbis, channels=2, rate=48000, '
'bitrate=80000, ENCODER=ffmpeg2theora 0.19)])')
def test_ffmpeg_convert(self):
f = ffmpeg.FFMpeg()
def consume(fn, *args, **kwargs):
return list(fn(*args, **kwargs))
self.assertRaisesSpecific(ffmpeg.FFMpegError, consume,
f.convert, 'nonexistent', self.video_file_path, [])
self.assertRaisesSpecific(ffmpeg.FFMpegConvertError, consume,
f.convert, '/etc/passwd', self.video_file_path, [])
info = f.probe('test1.ogg')
convert_options = [
'-acodec', 'libvorbis', '-ab', '16k', '-ac', '1', '-ar', '11025',
'-vcodec', 'libtheora', '-r', '15', '-s', '360x200', '-b', '128k']
conv = f.convert('test1.ogg', self.video_file_path, convert_options)
last_tc = 0.0
for tc in conv:
assert (last_tc < tc <= info.format.duration + 0.1), (last_tc, tc, info.format.duration)
self._assert_converted_video_file()
def _assert_converted_video_file(self):
"""
Asserts converted test1.ogg (in path self.video_file_path) is converted correctly
"""
f = ffmpeg.FFMpeg()
info = f.probe(self.video_file_path)
self.assertEqual('ogg', info.format.format)
self.assertAlmostEqual(33.00, info.format.duration, places=0)
self.assertEqual(2, len(info.streams))
self.assertEqual('video', info.video.type)
self.assertEqual('theora', info.video.codec)
self.assertEqual(360, info.video.video_width)
self.assertEqual(200, info.video.video_height)
self.assertAlmostEqual(15.00, info.video.video_fps, places=2)
self.assertEqual('audio', info.audio.type)
self.assertEqual('vorbis', info.audio.codec)
self.assertEqual(1, info.audio.audio_channels)
self.assertEqual(11025, info.audio.audio_samplerate)
def test_ffmpeg_termination(self):
# test when ffmpeg is killed
f = ffmpeg.FFMpeg()
convert_options = [
'-acodec', 'libvorbis', '-ab', '16k', '-ac', '1', '-ar', '11025',
'-vcodec', 'libtheora', '-r', '15', '-s', '360x200', '-b', '128k']
p_list = {} # modifiable object in closure
f._spawn = lambda *args: p_list.setdefault('', ffmpeg.FFMpeg._spawn(*args))
conv = f.convert('test1.ogg', self.video_file_path, convert_options)
next(conv) # let ffmpeg to start
p = p_list['']
p.terminate()
self.assertRaisesSpecific(ffmpeg.FFMpegConvertError, list, conv)
def test_converter(self):
c = Converter()
self.assertRaisesSpecific(ConverterError, c.parse_options, None)
self.assertRaisesSpecific(ConverterError, c.parse_options, {})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'foo'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg', 'video': 'whatever'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg', 'audio': {}})
self.assertRaisesSpecific(ConverterError, c.parse_options,
{'format': 'ogg', 'audio': {'codec': 'bogus'}})
self.assertEqual(['-an', '-vcodec', 'libtheora', '-r', '25', '-sn', '-f', 'ogg'],
c.parse_options({'format': 'ogg', 'video': {'codec': 'theora', 'fps': 25}}))
self.assertEqual(['-acodec', 'copy', '-vcodec', 'copy', '-sn', '-f', 'ogg'],
c.parse_options(
{'format': 'ogg', 'audio': {'codec': 'copy'}, 'video': {'codec': 'copy'}, 'subtitle': {'codec': None}}))
info = c.probe('test1.ogg')
self.assertEqual('theora', info.video.codec)
self.assertEqual(720, info.video.video_width)
self.assertEqual(400, info.video.video_height)
f = self.shot_file_path
self.ensure_notexist(f)
c.thumbnail('test1.ogg', 10, f)
self.assertTrue(os.path.exists(f))
os.unlink(f)
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
conv = c.convert('test.aac', self.audio_file_path, {
'format': 'mp3',
'audio': {
'codec': 'mp3', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_converter_2pass(self):
c = Converter()
options = {
'format': 'ogg',
'audio': {'codec': 'vorbis', 'samplerate': 11025, 'channels': 1, 'bitrate': 16},
'video': {'codec': 'theora', 'bitrate': 128, 'width': 360, 'height': 200, 'fps': 15}
}
options_repr = repr(options)
conv = c.convert('test1.ogg', self.video_file_path, options, twopass=True)
verify_progress(conv)
# Convert should not change options dict
self.assertEqual(options_repr, repr(options))
self._assert_converted_video_file()
def test_converter_vp8_codec(self):
c = Converter()
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'webm',
'video': {
'codec': 'vp8', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_probe_audio_poster(self):
c = Converter()
info = c.probe('test.mp3', posters_as_video=True)
self.assertNotEqual(None, info.video)
self.assertEqual(info.video.attached_pic, 1)
info = c.probe('test.mp3', posters_as_video=False)
self.assertEqual(None, info.video)
self.assertEqual(len(info.posters), 1)
poster = info.posters[0]
self.assertEqual(poster.type, 'video')
self.assertEqual(poster.codec, 'png')
self.assertEqual(poster.video_width, 32)
self.assertEqual(poster.video_height, 32)
self.assertEqual(poster.attached_pic, 1)
def test_concat(self):
c = Converter()
conv = c.concat(['test1.ogg', "test1.ogg"], self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
}, temp_dir=self.temp_dir)
self.assertTrue(verify_progress(conv))
def test_concat_with_option_per_file(self):
c = Converter()
conv = c.concat([('test1.ogg', ['-vf', 'transpose=2']), ("test1.ogg", ["-ss", "00:00:05", "-to", "00:00:25"])],
self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
}, temp_dir=self.temp_dir)
self.assertTrue(verify_progress(conv))
pass
def test_convert_with_additional_option(self):
c = Converter()
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_probe_image(self):
c = Converter()
info = c.probe('test.png', posters_as_video=True)
self.assertEqual(info.video.codec, 'png')
info = c.probe('test.jpg', posters_as_video=False)
self.assertEqual(info.video.codec, 'mjpeg')
def test_add_audio(self):
c = Converter()
i = c.convert('test1.ogg', '/tmp/audio.ogg',
options={
'format': 'mov',
'params': ['-i', 'test.mp3', '-map', '0:0', '-map', '1'],
'video': {
'codec': 'copy',
},
'audio': {
'codec': 'mp3',
'bitrate': 128 * 1024,
'channels': 1,
}
}
)
list(i)
def test_color_correction(self):
c = Converter()
i = c.convert('test.png', '/tmp/test.png',
options={
'format': 'image2',
'video': {
'codec': 'png',
'brightness': 0.,
'contrast': 1.0,
'hue': 0,
'saturation': 1.0,
'unsharp': 0.0,
}
}
)
list(i)
if __name__ == '__main__':
unittest.main()
|
<filename>test/test_converter.py
#!/usr/bin/env python
# modify the path so that parent directory is in it
import sys
sys.path.append('../')
import random
import string
import shutil
import unittest
import os
from os.path import join as pjoin
from converter import ffmpeg, formats, avcodecs, Converter, ConverterError
def verify_progress(p):
if not p:
return False
li = list(p)
if len(li) < 1:
return False
prev = 0
for i in li:
if type(i) != int or i < 0 or i > 100:
return False
if i < prev:
return False
prev = i
return True
class TestFFMpeg(unittest.TestCase):
def setUp(self):
current_dir = os.path.abspath(os.path.dirname(__file__))
temp_name = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20))
self.temp_dir = pjoin(current_dir, temp_name)
if not os.path.exists(self.temp_dir):
os.makedirs(self.temp_dir)
self.video_file_path = pjoin(self.temp_dir, 'output.ogg')
self.audio_file_path = pjoin(self.temp_dir, 'output.mp3')
self.shot_file_path = pjoin(self.temp_dir, 'shot.png')
self.shot2_file_path = pjoin(self.temp_dir, 'shot2.png')
self.shot3_file_path = pjoin(self.temp_dir, 'shot3.png')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def assertRaisesSpecific(self, exception, fn, *args, **kwargs):
try:
fn(*args, **kwargs)
raise Exception('Expected exception %s not raised' % repr(exception))
except exception:
ex = sys.exc_info()[1]
return ex
@staticmethod
def ensure_notexist(f):
if os.path.exists(f):
os.unlink(f)
def test_ffmpeg_probe(self):
self.assertRaisesSpecific(ffmpeg.FFMpegError, ffmpeg.FFMpeg,
ffmpeg_path='/foo', ffprobe_path='/bar')
f = ffmpeg.FFMpeg()
self.assertEqual(None, f.probe('nonexistent'))
self.assertEqual(None, f.probe('/dev/null'))
info = f.probe('test1.ogg')
self.assertEqual('ogg', info.format.format)
self.assertAlmostEqual(33.00, info.format.duration, places=2)
self.assertEqual(2, len(info.streams))
v = info.streams[0]
self.assertEqual(v, info.video)
self.assertEqual('video', v.type)
self.assertEqual('theora', v.codec)
self.assertEqual(720, v.video_width)
self.assertEqual(400, v.video_height)
self.assertEqual(None, v.bitrate)
self.assertAlmostEqual(25.00, v.video_fps, places=2)
self.assertEqual(v.metadata['ENCODER'], 'ffmpeg2theora 0.19')
a = info.streams[1]
self.assertEqual(a, info.audio)
self.assertEqual('audio', a.type)
self.assertEqual('vorbis', a.codec)
self.assertEqual(2, a.audio_channels)
self.assertEqual(80000, a.bitrate)
self.assertEqual(48000, a.audio_samplerate)
self.assertEqual(a.metadata['ENCODER'], 'ffmpeg2theora 0.19')
self.assertEqual(repr(info), 'MediaInfo(format='
'MediaFormatInfo(format=ogg, duration=33.00), streams=['
'MediaStreamInfo(type=video, codec=theora, width=720, '
'height=400, fps=25.0, ENCODER=ffmpeg2theora 0.19), '
'MediaStreamInfo(type=audio, codec=vorbis, channels=2, rate=48000, '
'bitrate=80000, ENCODER=ffmpeg2theora 0.19)])')
def test_ffmpeg_convert(self):
f = ffmpeg.FFMpeg()
def consume(fn, *args, **kwargs):
return list(fn(*args, **kwargs))
self.assertRaisesSpecific(ffmpeg.FFMpegError, consume,
f.convert, 'nonexistent', self.video_file_path, [])
self.assertRaisesSpecific(ffmpeg.FFMpegConvertError, consume,
f.convert, '/etc/passwd', self.video_file_path, [])
info = f.probe('test1.ogg')
convert_options = [
'-acodec', 'libvorbis', '-ab', '16k', '-ac', '1', '-ar', '11025',
'-vcodec', 'libtheora', '-r', '15', '-s', '360x200', '-b', '128k']
conv = f.convert('test1.ogg', self.video_file_path, convert_options)
last_tc = 0.0
for tc in conv:
assert (last_tc < tc <= info.format.duration + 0.1), (last_tc, tc, info.format.duration)
self._assert_converted_video_file()
def _assert_converted_video_file(self):
"""
Asserts converted test1.ogg (in path self.video_file_path) is converted correctly
"""
f = ffmpeg.FFMpeg()
info = f.probe(self.video_file_path)
self.assertEqual('ogg', info.format.format)
self.assertAlmostEqual(33.00, info.format.duration, places=0)
self.assertEqual(2, len(info.streams))
self.assertEqual('video', info.video.type)
self.assertEqual('theora', info.video.codec)
self.assertEqual(360, info.video.video_width)
self.assertEqual(200, info.video.video_height)
self.assertAlmostEqual(15.00, info.video.video_fps, places=2)
self.assertEqual('audio', info.audio.type)
self.assertEqual('vorbis', info.audio.codec)
self.assertEqual(1, info.audio.audio_channels)
self.assertEqual(11025, info.audio.audio_samplerate)
def test_ffmpeg_termination(self):
# test when ffmpeg is killed
f = ffmpeg.FFMpeg()
convert_options = [
'-acodec', 'libvorbis', '-ab', '16k', '-ac', '1', '-ar', '11025',
'-vcodec', 'libtheora', '-r', '15', '-s', '360x200', '-b', '128k']
p_list = {} # modifiable object in closure
f._spawn = lambda *args: p_list.setdefault('', ffmpeg.FFMpeg._spawn(*args))
conv = f.convert('test1.ogg', self.video_file_path, convert_options)
next(conv) # let ffmpeg to start
p = p_list['']
p.terminate()
self.assertRaisesSpecific(ffmpeg.FFMpegConvertError, list, conv)
def test_converter(self):
c = Converter()
self.assertRaisesSpecific(ConverterError, c.parse_options, None)
self.assertRaisesSpecific(ConverterError, c.parse_options, {})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'foo'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg', 'video': 'whatever'})
self.assertRaisesSpecific(ConverterError, c.parse_options, {'format': 'ogg', 'audio': {}})
self.assertRaisesSpecific(ConverterError, c.parse_options,
{'format': 'ogg', 'audio': {'codec': 'bogus'}})
self.assertEqual(['-an', '-vcodec', 'libtheora', '-r', '25', '-sn', '-f', 'ogg'],
c.parse_options({'format': 'ogg', 'video': {'codec': 'theora', 'fps': 25}}))
self.assertEqual(['-acodec', 'copy', '-vcodec', 'copy', '-sn', '-f', 'ogg'],
c.parse_options(
{'format': 'ogg', 'audio': {'codec': 'copy'}, 'video': {'codec': 'copy'}, 'subtitle': {'codec': None}}))
info = c.probe('test1.ogg')
self.assertEqual('theora', info.video.codec)
self.assertEqual(720, info.video.video_width)
self.assertEqual(400, info.video.video_height)
f = self.shot_file_path
self.ensure_notexist(f)
c.thumbnail('test1.ogg', 10, f)
self.assertTrue(os.path.exists(f))
os.unlink(f)
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
conv = c.convert('test.aac', self.audio_file_path, {
'format': 'mp3',
'audio': {
'codec': 'mp3', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_converter_2pass(self):
c = Converter()
options = {
'format': 'ogg',
'audio': {'codec': 'vorbis', 'samplerate': 11025, 'channels': 1, 'bitrate': 16},
'video': {'codec': 'theora', 'bitrate': 128, 'width': 360, 'height': 200, 'fps': 15}
}
options_repr = repr(options)
conv = c.convert('test1.ogg', self.video_file_path, options, twopass=True)
verify_progress(conv)
# Convert should not change options dict
self.assertEqual(options_repr, repr(options))
self._assert_converted_video_file()
def test_converter_vp8_codec(self):
c = Converter()
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'webm',
'video': {
'codec': 'vp8', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_probe_audio_poster(self):
c = Converter()
info = c.probe('test.mp3', posters_as_video=True)
self.assertNotEqual(None, info.video)
self.assertEqual(info.video.attached_pic, 1)
info = c.probe('test.mp3', posters_as_video=False)
self.assertEqual(None, info.video)
self.assertEqual(len(info.posters), 1)
poster = info.posters[0]
self.assertEqual(poster.type, 'video')
self.assertEqual(poster.codec, 'png')
self.assertEqual(poster.video_width, 32)
self.assertEqual(poster.video_height, 32)
self.assertEqual(poster.attached_pic, 1)
def test_concat(self):
c = Converter()
conv = c.concat(['test1.ogg', "test1.ogg"], self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
}, temp_dir=self.temp_dir)
self.assertTrue(verify_progress(conv))
def test_concat_with_option_per_file(self):
c = Converter()
conv = c.concat([('test1.ogg', ['-vf', 'transpose=2']), ("test1.ogg", ["-ss", "00:00:05", "-to", "00:00:25"])],
self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
}, temp_dir=self.temp_dir)
self.assertTrue(verify_progress(conv))
pass
def test_convert_with_additional_option(self):
c = Converter()
conv = c.convert('test1.ogg', self.video_file_path, {
'format': 'ogg',
'video': {
'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300},
'audio': {
'codec': 'vorbis', 'channels': 1, 'bitrate': 32}
})
self.assertTrue(verify_progress(conv))
def test_probe_image(self):
c = Converter()
info = c.probe('test.png', posters_as_video=True)
self.assertEqual(info.video.codec, 'png')
info = c.probe('test.jpg', posters_as_video=False)
self.assertEqual(info.video.codec, 'mjpeg')
def test_add_audio(self):
c = Converter()
i = c.convert('test1.ogg', '/tmp/audio.ogg',
options={
'format': 'mov',
'params': ['-i', 'test.mp3', '-map', '0:0', '-map', '1'],
'video': {
'codec': 'copy',
},
'audio': {
'codec': 'mp3',
'bitrate': 128 * 1024,
'channels': 1,
}
}
)
list(i)
def test_color_correction(self):
c = Converter()
i = c.convert('test.png', '/tmp/test.png',
options={
'format': 'image2',
'video': {
'codec': 'png',
'brightness': 0.,
'contrast': 1.0,
'hue': 0,
'saturation': 1.0,
'unsharp': 0.0,
}
}
)
list(i)
if __name__ == '__main__':
unittest.main()
|
en
| 0.729901
|
#!/usr/bin/env python # modify the path so that parent directory is in it Asserts converted test1.ogg (in path self.video_file_path) is converted correctly # test when ffmpeg is killed # modifiable object in closure # let ffmpeg to start # Convert should not change options dict
| 2.652674
| 3
|
exchangelib/services/common.py
|
tkurze/exchangelib
| 0
|
6625607
|
import abc
import logging
import traceback
from itertools import chain
from .. import errors
from ..attachments import AttachmentId
from ..credentials import IMPERSONATION, OAuth2Credentials
from ..errors import EWSWarning, TransportError, SOAPError, ErrorTimeoutExpired, ErrorBatchProcessingStopped, \
ErrorQuotaExceeded, ErrorCannotDeleteObject, ErrorCreateItemAccessDenied, ErrorFolderNotFound, \
ErrorNonExistentMailbox, ErrorMailboxStoreUnavailable, ErrorImpersonateUserDenied, ErrorInternalServerError, \
ErrorInternalServerTransientError, ErrorNoRespondingCASInDestinationSite, ErrorImpersonationFailed, \
ErrorMailboxMoveInProgress, ErrorAccessDenied, ErrorConnectionFailed, RateLimitError, ErrorServerBusy, \
ErrorTooManyObjectsOpened, ErrorInvalidLicense, ErrorInvalidSchemaVersionForMailboxVersion, \
ErrorInvalidServerVersion, ErrorItemNotFound, ErrorADUnavailable, ErrorInvalidChangeKey, \
ErrorItemSave, ErrorInvalidIdMalformed, ErrorMessageSizeExceeded, UnauthorizedError, \
ErrorCannotDeleteTaskOccurrence, ErrorMimeContentConversionFailed, ErrorRecurrenceHasNoOccurrence, \
ErrorNoPublicFolderReplicaAvailable, MalformedResponseError, ErrorExceededConnectionCount, \
SessionPoolMinSizeReached, ErrorIncorrectSchemaVersion, ErrorInvalidRequest, ErrorCorruptData, \
ErrorCannotEmptyFolder, ErrorDeleteDistinguishedFolder, ErrorInvalidSubscription, ErrorInvalidWatermark, \
ErrorInvalidSyncStateData, ErrorNameResolutionNoResults, ErrorNameResolutionMultipleResults, \
ErrorConnectionFailedTransientError, ErrorDelegateNoUser, ErrorNotDelegate, InvalidTypeError
from ..folders import BaseFolder, Folder, RootOfHierarchy
from ..items import BaseItem
from ..properties import FieldURI, IndexedFieldURI, ExtendedFieldURI, ExceptionFieldURI, ItemId, FolderId, \
DistinguishedFolderId, BaseItemId
from ..transport import wrap
from ..util import chunkify, create_element, add_xml_child, get_xml_attr, to_xml, post_ratelimited, \
xml_to_str, set_xml_value, SOAPNS, TNS, MNS, ENS, ParseError, DummyResponse
from ..version import API_VERSIONS, Version
log = logging.getLogger(__name__)
PAGE_SIZE = 100 # A default page size for all paging services. This is the number of items we request per page
CHUNK_SIZE = 100 # A default chunk size for all services. This is the number of items we send in a single request
KNOWN_EXCEPTIONS = (
ErrorAccessDenied,
ErrorADUnavailable,
ErrorBatchProcessingStopped,
ErrorCannotDeleteObject,
ErrorCannotEmptyFolder,
ErrorConnectionFailed,
ErrorConnectionFailedTransientError,
ErrorCreateItemAccessDenied,
ErrorDelegateNoUser,
ErrorDeleteDistinguishedFolder,
ErrorExceededConnectionCount,
ErrorFolderNotFound,
ErrorImpersonateUserDenied,
ErrorImpersonationFailed,
ErrorInternalServerError,
ErrorInternalServerTransientError,
ErrorInvalidChangeKey,
ErrorInvalidLicense,
ErrorInvalidSubscription,
ErrorInvalidSyncStateData,
ErrorInvalidWatermark,
ErrorItemNotFound,
ErrorMailboxMoveInProgress,
ErrorMailboxStoreUnavailable,
ErrorNameResolutionMultipleResults,
ErrorNameResolutionNoResults,
ErrorNonExistentMailbox,
ErrorNoPublicFolderReplicaAvailable,
ErrorNoRespondingCASInDestinationSite,
ErrorNotDelegate,
ErrorQuotaExceeded,
ErrorTimeoutExpired,
RateLimitError,
UnauthorizedError,
)
class EWSService(metaclass=abc.ABCMeta):
"""Base class for all EWS services."""
SERVICE_NAME = None # The name of the SOAP service
element_container_name = None # The name of the XML element wrapping the collection of returned items
paging_container_name = None # The name of the element that contains paging information and the paged results
returns_elements = True # If False, the service does not return response elements, just the ResponseCode status
# Return exception instance instead of raising exceptions for the following errors when contained in an element
ERRORS_TO_CATCH_IN_RESPONSE = (
EWSWarning, ErrorCannotDeleteObject, ErrorInvalidChangeKey, ErrorItemNotFound, ErrorItemSave,
ErrorInvalidIdMalformed, ErrorMessageSizeExceeded, ErrorCannotDeleteTaskOccurrence,
ErrorMimeContentConversionFailed, ErrorRecurrenceHasNoOccurrence, ErrorCorruptData
)
# Similarly, define the warnings we want to return unraised
WARNINGS_TO_CATCH_IN_RESPONSE = ErrorBatchProcessingStopped
# Define the warnings we want to ignore, to let response processing proceed
WARNINGS_TO_IGNORE_IN_RESPONSE = ()
# The exception type to raise when all attempted API versions failed
NO_VALID_SERVER_VERSIONS = ErrorInvalidServerVersion
# Marks the version from which the service was introduced
supported_from = None
# Marks services that support paging of requested items
supports_paging = False
def __init__(self, protocol, chunk_size=None, timeout=None):
self.chunk_size = chunk_size or CHUNK_SIZE
if not isinstance(self.chunk_size, int):
raise InvalidTypeError('chunk_size', chunk_size, int)
if self.chunk_size < 1:
raise ValueError(f"'chunk_size' {self.chunk_size} must be a positive number")
if self.supported_from and protocol.version.build < self.supported_from:
raise NotImplementedError(
f'{self.SERVICE_NAME!r} is only supported on {self.supported_from.fullname()!r} and later. '
f'Your current version is {protocol.version.build.fullname()!r}.'
)
self.protocol = protocol
# Allow a service to override the default protocol timeout. Useful for streaming services
self.timeout = timeout
# Controls whether the HTTP request should be streaming or fetch everything at once
self.streaming = False
# Streaming connection variables
self._streaming_session = None
self._streaming_response = None
# The following two methods are the minimum required to be implemented by subclasses, but the name and number of
# kwargs differs between services. Therefore, we cannot make these methods abstract.
# @abc.abstractmethod
# def call(self, **kwargs):
# """Defines the arguments required by the service. Arguments are basic Python types or EWSElement objects.
# Returns either XML objects or EWSElement objects.
# """"
# pass
# @abc.abstractmethod
# def get_payload(self, **kwargs):
# """Using the arguments from .call(), return the payload expected by the service, as an XML object. The XML
# object should consist of a SERVICE_NAME element and everything within that.
# """
# pass
def get(self, expect_result=True, **kwargs):
"""Like .call(), but expects exactly one result from the server, or zero when expect_result=False, or either
zero or one when expect_result=None. Returns either one object or None.
:param expect_result: None, True, or False
:param kwargs: Same as arguments for .call()
:return: Same as .call(), but returns either None or exactly one item
"""
res = list(self.call(**kwargs))
# Raise any errors
for r in res:
if isinstance(r, Exception):
raise r
if expect_result is None and not res:
# Allow empty result
return None
if expect_result is False:
if res:
raise ValueError(f'Expected result length 0, but got {res}')
return None
if len(res) != 1:
raise ValueError(f'Expected result length 1, but got {res}')
return res[0]
def parse(self, xml):
"""Used mostly for testing, when we want to parse static XML data."""
resp = DummyResponse(content=xml, streaming=self.streaming)
_, body = self._get_soap_parts(response=resp)
return self._elems_to_objs(self._get_elements_in_response(response=self._get_soap_messages(body=body)))
def _elems_to_objs(self, elems):
"""Takes a generator of XML elements and exceptions. Returns the equivalent Python objects (or exceptions)."""
for elem in elems:
# Allow None here. Some services don't return an ID if the target folder is outside the mailbox.
if isinstance(elem, (Exception, type(None))):
yield elem
continue
yield self._elem_to_obj(elem)
def _elem_to_obj(self, elem):
if not self.returns_elements:
raise RuntimeError("Incorrect call to method when 'returns_elements' is False")
raise NotImplementedError()
@property
def _version_hint(self):
# We may be here due to version guessing in Protocol.version, so we can't use the self.protocol.version property
return self.protocol.config.version
@_version_hint.setter
def _version_hint(self, value):
self.protocol.config.version = value
def _extra_headers(self, session):
return {}
@property
def _account_to_impersonate(self):
if isinstance(self.protocol.credentials, OAuth2Credentials):
return self.protocol.credentials.identity
return None
@property
def _timezone(self):
return None
def _response_generator(self, payload):
"""Send the payload to the server, and return the response.
:param payload: payload as an XML object
:return: the response, as XML objects
"""
response = self._get_response_xml(payload=payload)
if self.supports_paging:
return (self._get_page(message) for message in response)
return self._get_elements_in_response(response=response)
def _chunked_get_elements(self, payload_func, items, **kwargs):
"""Yield elements in a response. Like ._get_elements(), but chop items into suitable chunks and send multiple
requests.
:param payload_func: A reference to .payload()
:param items: An iterable of items (messages, folders, etc.) to process
:param kwargs: Same as arguments for .call(), except for the 'items' argument
:return: Same as ._get_elements()
"""
# If the input for a service is a QuerySet, it can be difficult to remove exceptions before now
filtered_items = filter(lambda i: not isinstance(i, Exception), items)
for i, chunk in enumerate(chunkify(filtered_items, self.chunk_size), start=1):
log.debug('Processing chunk %s containing %s items', i, len(chunk))
yield from self._get_elements(payload=payload_func(chunk, **kwargs))
def stop_streaming(self):
if not self.streaming:
raise RuntimeError('Attempt to stop a non-streaming service')
if self._streaming_response:
self._streaming_response.close() # Release memory
self._streaming_response = None
if self._streaming_session:
self.protocol.release_session(self._streaming_session)
self._streaming_session = None
def _get_elements(self, payload):
"""Send the payload to be sent and parsed. Handles and re-raise exceptions that are not meant to be returned
to the caller as exception objects. Retry the request according to the retry policy.
"""
while True:
try:
# Create a generator over the response elements so exceptions in response elements are also raised
# here and can be handled.
yield from self._response_generator(payload=payload)
return
except ErrorServerBusy as e:
self._handle_backoff(e)
continue
except KNOWN_EXCEPTIONS:
# These are known and understood, and don't require a backtrace.
raise
except (ErrorTooManyObjectsOpened, ErrorTimeoutExpired) as e:
# ErrorTooManyObjectsOpened means there are too many connections to the Exchange database. This is very
# often a symptom of sending too many requests.
#
# ErrorTimeoutExpired can be caused by a busy server, or by overly large requests. Start by lowering the
# session count. This is done by downstream code.
if isinstance(e, ErrorTimeoutExpired) and self.protocol.session_pool_size <= 1:
# We're already as low as we can go, so downstream cannot limit the session count to put less load
# on the server. We don't have a way of lowering the page size of requests from
# this part of the code yet. Let the user handle this.
raise e
# Re-raise as an ErrorServerBusy with a default delay of 5 minutes
raise ErrorServerBusy(f'Reraised from {e.__class__.__name__}({e})')
except Exception:
# This may run in a thread, which obfuscates the stack trace. Print trace immediately.
account = self.account if isinstance(self, EWSAccountService) else None
log.warning('Account %s: Exception in _get_elements: %s', account, traceback.format_exc(20))
raise
finally:
if self.streaming:
self.stop_streaming()
def _handle_response_cookies(self, session):
pass
def _get_response(self, payload, api_version):
"""Send the actual HTTP request and get the response."""
session = self.protocol.get_session()
if self.streaming:
# Make sure to clean up lingering resources
self.stop_streaming()
r, session = post_ratelimited(
protocol=self.protocol,
session=session,
url=self.protocol.service_endpoint,
headers=self._extra_headers(session),
data=wrap(
content=payload,
api_version=api_version,
account_to_impersonate=self._account_to_impersonate,
timezone=self._timezone,
),
allow_redirects=False,
stream=self.streaming,
timeout=self.timeout or self.protocol.TIMEOUT,
)
self._handle_response_cookies(session)
if self.streaming:
# We con only release the session when we have fully consumed the response. Save session and response
# objects for later.
self._streaming_session, self._streaming_response = session, r
else:
self.protocol.release_session(session)
return r
@property
def _api_versions_to_try(self):
# Put the hint first in the list, and then all other versions except the hint, from newest to oldest
return (self._version_hint.api_version,) + tuple(v for v in API_VERSIONS if v != self._version_hint.api_version)
def _get_response_xml(self, payload, **parse_opts):
"""Send the payload to the server and return relevant elements from the result. Several things happen here:
* The payload is wrapped in SOAP headers and sent to the server
* The Exchange API version is negotiated and stored in the protocol object
* Connection errors are handled and possibly reraised as ErrorServerBusy
* SOAP errors are raised
* EWS errors are raised, or passed on to the caller
:param payload: The request payload, as an XML object
:return: A generator of XML objects or None if the service does not return a result
"""
# Microsoft really doesn't want to make our lives easy. The server may report one version in our initial version
# guessing tango, but then the server may decide that any arbitrary legacy backend server may actually process
# the request for an account. Prepare to handle version-related errors and set the server version per-account.
log.debug('Calling service %s', self.SERVICE_NAME)
for api_version in self._api_versions_to_try:
log.debug('Trying API version %s', api_version)
r = self._get_response(payload=payload, api_version=api_version)
if self.streaming:
# Let 'requests' decode raw data automatically
r.raw.decode_content = True
try:
header, body = self._get_soap_parts(response=r, **parse_opts)
except Exception:
r.close() # Release memory
raise
# The body may contain error messages from Exchange, but we still want to collect version info
if header is not None:
self._update_api_version(api_version=api_version, header=header, **parse_opts)
try:
return self._get_soap_messages(body=body, **parse_opts)
except (ErrorInvalidServerVersion, ErrorIncorrectSchemaVersion, ErrorInvalidRequest,
ErrorInvalidSchemaVersionForMailboxVersion):
# The guessed server version is wrong. Try the next version
log.debug('API version %s was invalid', api_version)
continue
except ErrorExceededConnectionCount as e:
# This indicates that the connecting user has too many open TCP connections to the server. Decrease
# our session pool size.
try:
self.protocol.decrease_poolsize()
continue
except SessionPoolMinSizeReached:
# We're already as low as we can go. Let the user handle this.
raise e
finally:
if not self.streaming:
# In streaming mode, we may not have accessed the raw stream yet. Caller must handle this.
r.close() # Release memory
raise self.NO_VALID_SERVER_VERSIONS(f'Tried versions {self._api_versions_to_try} but all were invalid')
def _handle_backoff(self, e):
"""Take a request from the server to back off and checks the retry policy for what to do. Re-raise the
exception if conditions are not met.
:param e: An ErrorServerBusy instance
:return:
"""
log.debug('Got ErrorServerBusy (back off %s seconds)', e.back_off)
# ErrorServerBusy is very often a symptom of sending too many requests. Scale back connections if possible.
try:
self.protocol.decrease_poolsize()
except SessionPoolMinSizeReached:
pass
if self.protocol.retry_policy.fail_fast:
raise e
self.protocol.retry_policy.back_off(e.back_off)
# We'll warn about this later if we actually need to sleep
def _update_api_version(self, api_version, header, **parse_opts):
"""Parse the server version contained in SOAP headers and update the version hint stored by the caller, if
necessary.
"""
try:
head_version = Version.from_soap_header(requested_api_version=api_version, header=header)
except TransportError as te:
log.debug('Failed to update version info (%s)', te)
return
if self._version_hint == head_version:
# Nothing to do
return
log.debug('Found new version (%s -> %s)', self._version_hint, head_version)
# The api_version that worked was different than our hint, or we never got a build version. Store the working
# version.
self._version_hint = head_version
@classmethod
def _response_tag(cls):
"""Return the name of the element containing the service response."""
return f'{{{MNS}}}{cls.SERVICE_NAME}Response'
@staticmethod
def _response_messages_tag():
"""Return the name of the element containing service response messages."""
return f'{{{MNS}}}ResponseMessages'
@classmethod
def _response_message_tag(cls):
"""Return the name of the element of a single response message."""
return f'{{{MNS}}}{cls.SERVICE_NAME}ResponseMessage'
@classmethod
def _get_soap_parts(cls, response, **parse_opts):
"""Split the SOAP response into its headers an body elements."""
try:
root = to_xml(response.iter_content())
except ParseError as e:
raise SOAPError(f'Bad SOAP response: {e}')
header = root.find(f'{{{SOAPNS}}}Header')
if header is None:
# This is normal when the response contains SOAP-level errors
log.debug('No header in XML response')
body = root.find(f'{{{SOAPNS}}}Body')
if body is None:
raise MalformedResponseError('No Body element in SOAP response')
return header, body
def _get_soap_messages(self, body, **parse_opts):
"""Return the elements in the response containing the response messages. Raises any SOAP exceptions."""
response = body.find(self._response_tag())
if response is None:
fault = body.find(f'{{{SOAPNS}}}Fault')
if fault is None:
raise SOAPError(f'Unknown SOAP response (expected {self._response_tag()} or Fault): {xml_to_str(body)}')
self._raise_soap_errors(fault=fault) # Will throw SOAPError or custom EWS error
response_messages = response.find(self._response_messages_tag())
if response_messages is None:
# Result isn't delivered in a list of FooResponseMessages, but directly in the FooResponse. Consumers expect
# a list, so return a list
return [response]
return response_messages.findall(self._response_message_tag())
@classmethod
def _raise_soap_errors(cls, fault):
"""Parse error messages contained in SOAP headers and raise as exceptions defined in this package."""
# Fault: See http://www.w3.org/TR/2000/NOTE-SOAP-20000508/#_Toc478383507
fault_code = get_xml_attr(fault, 'faultcode')
fault_string = get_xml_attr(fault, 'faultstring')
fault_actor = get_xml_attr(fault, 'faultactor')
detail = fault.find('detail')
if detail is not None:
code, msg = None, ''
if detail.find(f'{{{ENS}}}ResponseCode') is not None:
code = get_xml_attr(detail, f'{{{ENS}}}ResponseCode').strip()
if detail.find(f'{{{ENS}}}Message') is not None:
msg = get_xml_attr(detail, f'{{{ENS}}}Message').strip()
msg_xml = detail.find(f'{{{TNS}}}MessageXml') # Crazy. Here, it's in the TNS namespace
if code == 'ErrorServerBusy':
back_off = None
try:
value = msg_xml.find(f'{{{TNS}}}Value')
if value.get('Name') == 'BackOffMilliseconds':
back_off = int(value.text) / 1000.0 # Convert to seconds
except (TypeError, AttributeError):
pass
raise ErrorServerBusy(msg, back_off=back_off)
if code == 'ErrorSchemaValidation' and msg_xml is not None:
line_number = get_xml_attr(msg_xml, f'{{{TNS}}}LineNumber')
line_position = get_xml_attr(msg_xml, f'{{{TNS}}}LinePosition')
violation = get_xml_attr(msg_xml, f'{{{TNS}}}Violation')
if violation:
msg = f'{msg} {violation}'
if line_number or line_position:
msg = f'{msg} (line: {line_number} position: {line_position})'
try:
raise vars(errors)[code](msg)
except KeyError:
detail = f'{cls.SERVICE_NAME}: code: {code} msg: {msg} ({xml_to_str(detail)})'
try:
raise vars(errors)[fault_code](fault_string)
except KeyError:
pass
raise SOAPError(f'SOAP error code: {fault_code} string: {fault_string} actor: {fault_actor} detail: {detail}')
def _get_element_container(self, message, name=None):
"""Return the XML element in a response element that contains the elements we want the service to return. For
example, in a GetFolder response, 'message' is the GetFolderResponseMessage element, and we return the 'Folders'
element:
<m:GetFolderResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Folders>
<t:Folder>
<t:FolderId Id="AQApA=" ChangeKey="AQAAAB" />
[...]
</t:Folder>
</m:Folders>
</m:GetFolderResponseMessage>
Some service responses don't have a containing element for the returned elements ('name' is None). In
that case, we return the 'SomeServiceResponseMessage' element.
If the response contains a warning or an error message, we raise the relevant exception, unless the error class
is contained in WARNINGS_TO_CATCH_IN_RESPONSE or ERRORS_TO_CATCH_IN_RESPONSE, in which case we return the
exception instance.
"""
# ResponseClass is an XML attribute of various SomeServiceResponseMessage elements: Possible values are:
# Success, Warning, Error. See e.g.
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/finditemresponsemessage
response_class = message.get('ResponseClass')
# ResponseCode, MessageText: See
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/responsecode
response_code = get_xml_attr(message, f'{{{MNS}}}ResponseCode')
if response_class == 'Success' and response_code == 'NoError':
if not name:
return message
container = message.find(name)
if container is None:
raise MalformedResponseError(f'No {name} elements in ResponseMessage ({xml_to_str(message)})')
return container
if response_code == 'NoError':
return True
# Raise any non-acceptable errors in the container, or return the container or the acceptable exception instance
msg_text = get_xml_attr(message, f'{{{MNS}}}MessageText')
msg_xml = message.find(f'{{{MNS}}}MessageXml')
if response_class == 'Warning':
try:
raise self._get_exception(code=response_code, text=msg_text, msg_xml=msg_xml)
except self.WARNINGS_TO_CATCH_IN_RESPONSE as e:
return e
except self.WARNINGS_TO_IGNORE_IN_RESPONSE as e:
log.warning(str(e))
container = message.find(name)
if container is None:
raise MalformedResponseError(f'No {name} elements in ResponseMessage ({xml_to_str(message)})')
return container
# rspclass == 'Error', or 'Success' and not 'NoError'
try:
raise self._get_exception(code=response_code, text=msg_text, msg_xml=msg_xml)
except self.ERRORS_TO_CATCH_IN_RESPONSE as e:
return e
@staticmethod
def _get_exception(code, text, msg_xml):
"""Parse error messages contained in EWS responses and raise as exceptions defined in this package."""
if not code:
return TransportError(f'Empty ResponseCode in ResponseMessage (MessageText: {text}, MessageXml: {msg_xml})')
if msg_xml is not None:
# If this is an ErrorInvalidPropertyRequest error, the xml may contain a specific FieldURI
for elem_cls in (FieldURI, IndexedFieldURI, ExtendedFieldURI, ExceptionFieldURI):
elem = msg_xml.find(elem_cls.response_tag())
if elem is not None:
field_uri = elem_cls.from_xml(elem, account=None)
text += f' (field: {field_uri})'
break
# If this is an ErrorInvalidValueForProperty error, the xml may contain the name and value of the property
if code == 'ErrorInvalidValueForProperty':
msg_parts = {}
for elem in msg_xml.findall(f'{{{TNS}}}Value'):
key, val = elem.get('Name'), elem.text
if key:
msg_parts[key] = val
if msg_parts:
text += f" ({', '.join(f'{k}: {v}' for k, v in msg_parts.items())})"
# If this is an ErrorInternalServerError error, the xml may contain a more specific error code
inner_code, inner_text = None, None
for value_elem in msg_xml.findall(f'{{{TNS}}}Value'):
name = value_elem.get('Name')
if name == 'InnerErrorResponseCode':
inner_code = value_elem.text
elif name == 'InnerErrorMessageText':
inner_text = value_elem.text
if inner_code:
try:
# Raise the error as the inner error code
return vars(errors)[inner_code](f'{inner_text} (raised from: {code}({text!r}))')
except KeyError:
# Inner code is unknown to us. Just append to the original text
text += f' (inner error: {inner_code}({inner_text!r}))'
try:
# Raise the error corresponding to the ResponseCode
return vars(errors)[code](text)
except KeyError:
# Should not happen
return TransportError(
f'Unknown ResponseCode in ResponseMessage: {code} (MessageText: {text}, MessageXml: {msg_xml})'
)
def _get_elements_in_response(self, response):
"""Take a list of 'SomeServiceResponseMessage' elements and return the elements in each response message that
we want the service to return. With e.g. 'CreateItem', we get a list of 'CreateItemResponseMessage' elements
and return the 'Message' elements.
<m:CreateItemResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Items>
<t:Message>
<t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/>
</t:Message>
</m:Items>
</m:CreateItemResponseMessage>
<m:CreateItemResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Items>
<t:Message>
<t:ItemId Id="AQApB=" ChangeKey="AQAAAC"/>
</t:Message>
</m:Items>
</m:CreateItemResponseMessage>
:param response: a list of 'SomeServiceResponseMessage' XML objects
:return: a generator of items as returned by '_get_elements_in_container()
"""
for msg in response:
container_or_exc = self._get_element_container(message=msg, name=self.element_container_name)
if isinstance(container_or_exc, (bool, Exception)):
yield container_or_exc
else:
for c in self._get_elements_in_container(container=container_or_exc):
yield c
@classmethod
def _get_elements_in_container(cls, container):
"""Return a list of response elements from an XML response element container. With e.g.
'CreateItem', 'Items' is the container element and we return the 'Message' child elements:
<m:Items>
<t:Message>
<t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/>
</t:Message>
</m:Items>
If the service does not return response elements, return True to indicate the status. Errors have already been
raised.
"""
if cls.returns_elements:
return list(container)
return [True]
class EWSAccountService(EWSService, metaclass=abc.ABCMeta):
"""Base class for services that act on items concerning a single Mailbox on the server."""
NO_VALID_SERVER_VERSIONS = ErrorInvalidSchemaVersionForMailboxVersion
# Marks services that need affinity to the backend server
prefer_affinity = False
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account')
kwargs['protocol'] = self.account.protocol
super().__init__(*args, **kwargs)
@property
def _version_hint(self):
return self.account.version
@_version_hint.setter
def _version_hint(self, value):
self.account.version = value
def _handle_response_cookies(self, session):
super()._handle_response_cookies(session=session)
# See self._extra_headers() for documentation on affinity
if self.prefer_affinity:
for cookie in session.cookies:
if cookie.name == 'X-BackEndOverrideCookie':
self.account.affinity_cookie = cookie.value
break
def _extra_headers(self, session):
headers = super()._extra_headers(session=session)
# See
# https://blogs.msdn.microsoft.com/webdav_101/2015/05/11/best-practices-ews-authentication-and-access-issues/
headers['X-AnchorMailbox'] = self.account.primary_smtp_address
# See
# https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/how-to-maintain-affinity-between-group-of-subscriptions-and-mailbox-server
if self.prefer_affinity:
headers['X-PreferServerAffinity'] = 'True'
if self.account.affinity_cookie:
headers['X-BackEndOverrideCookie'] = self.account.affinity_cookie
return headers
@property
def _account_to_impersonate(self):
if self.account.access_type == IMPERSONATION:
return self.account.identity
return None
@property
def _timezone(self):
return self.account.default_timezone
class EWSPagingService(EWSAccountService):
def __init__(self, *args, **kwargs):
self.page_size = kwargs.pop('page_size', None) or PAGE_SIZE
if not isinstance(self.page_size, int):
raise InvalidTypeError('page_size', self.page_size, int)
if self.page_size < 1:
raise ValueError(f"'page_size' {self.page_size} must be a positive number")
super().__init__(*args, **kwargs)
def _paged_call(self, payload_func, max_items, folders, **kwargs):
"""Call a service that supports paging requests. Return a generator over all response items. Keeps track of
all paging-related counters.
"""
paging_infos = {f: dict(item_count=0, next_offset=None) for f in folders}
common_next_offset = kwargs['offset']
total_item_count = 0
while True:
if not paging_infos:
# Paging is done for all folders
break
log.debug('Getting page at offset %s (max_items %s)', common_next_offset, max_items)
kwargs['offset'] = common_next_offset
kwargs['folders'] = paging_infos.keys() # Only request the paging of the remaining folders.
pages = self._get_pages(payload_func, kwargs, len(paging_infos))
for (page, next_offset), (f, paging_info) in zip(pages, list(paging_infos.items())):
paging_info['next_offset'] = next_offset
if isinstance(page, Exception):
# Assume this folder no longer works. Don't attempt to page it again.
log.debug('Exception occurred for folder %s. Removing.', f)
del paging_infos[f]
yield page
continue
if page is not None:
for elem in self._get_elems_from_page(page, max_items, total_item_count):
paging_info['item_count'] += 1
total_item_count += 1
yield elem
if max_items and total_item_count >= max_items:
# No need to continue. Break out of inner loop
log.debug("'max_items' count reached (inner)")
break
if not paging_info['next_offset']:
# Paging is done for this folder. Don't attempt to page it again.
log.debug('Paging has completed for folder %s. Removing.', f)
del paging_infos[f]
continue
log.debug('Folder %s still has items', f)
# Check sanity of paging offsets, but don't fail. When we are iterating huge collections that take a
# long time to complete, the collection may change while we are iterating. This can affect the
# 'next_offset' value and make it inconsistent with the number of already collected items.
# We may have a mismatch if we stopped early due to reaching 'max_items'.
if paging_info['next_offset'] != paging_info['item_count'] and (
not max_items or total_item_count < max_items
):
log.warning('Unexpected next offset: %s -> %s. Maybe the server-side collection has changed?',
paging_info['item_count'], paging_info['next_offset'])
# Also break out of outer loop
if max_items and total_item_count >= max_items:
log.debug("'max_items' count reached (outer)")
break
common_next_offset = self._get_next_offset(paging_infos.values())
if common_next_offset is None:
# Paging is done for all folders
break
@staticmethod
def _get_paging_values(elem):
"""Read paging information from the paging container element."""
offset_attr = elem.get('IndexedPagingOffset')
next_offset = None if offset_attr is None else int(offset_attr)
item_count = int(elem.get('TotalItemsInView'))
is_last_page = elem.get('IncludesLastItemInRange').lower() in ('true', '0')
log.debug('Got page with offset %s, item_count %s, last_page %s', next_offset, item_count, is_last_page)
# Clean up contradictory paging values
if next_offset is None and not is_last_page:
log.debug("Not last page in range, but server didn't send a page offset. Assuming first page")
next_offset = 1
if next_offset is not None and is_last_page:
if next_offset != item_count:
log.debug("Last page in range, but we still got an offset. Assuming paging has completed")
next_offset = None
if not item_count and not is_last_page:
log.debug("Not last page in range, but also no items left. Assuming paging has completed")
next_offset = None
if item_count and next_offset == 0:
log.debug("Non-zero offset, but also no items left. Assuming paging has completed")
next_offset = None
return item_count, next_offset
def _get_page(self, message):
"""Get a single page from a request message, and return the container and next offset."""
paging_elem = self._get_element_container(message=message, name=self.paging_container_name)
if isinstance(paging_elem, Exception):
return paging_elem, None
item_count, next_offset = self._get_paging_values(paging_elem)
if not item_count:
paging_elem = None
return paging_elem, next_offset
def _get_elems_from_page(self, elem, max_items, total_item_count):
container = elem.find(self.element_container_name)
if container is None:
raise MalformedResponseError(
f'No {self.element_container_name} elements in ResponseMessage ({xml_to_str(elem)})'
)
for e in self._get_elements_in_container(container=container):
if max_items and total_item_count >= max_items:
# No need to continue. Break out of elements loop
log.debug("'max_items' count reached (elements)")
break
yield e
def _get_pages(self, payload_func, kwargs, expected_message_count):
"""Request a page, or a list of pages if multiple collections are pages in a single request. Return each
page.
"""
payload = payload_func(**kwargs)
page_elems = list(self._get_elements(payload=payload))
if len(page_elems) != expected_message_count:
raise MalformedResponseError(
f"Expected {expected_message_count} items in 'response', got {len(page_elems)}"
)
return page_elems
@staticmethod
def _get_next_offset(paging_infos):
next_offsets = {p['next_offset'] for p in paging_infos if p['next_offset'] is not None}
if not next_offsets:
# Paging is done for all messages
return None
# We cannot guarantee that all messages that have a next_offset also have the *same* next_offset. This is
# because the collections that we are iterating may change while iterating. We'll do our best but we cannot
# guarantee 100% consistency when large collections are simultaneously being changed on the server.
#
# It's not possible to supply a per-folder offset when iterating multiple folders, so we'll just have to
# choose something that is most likely to work. Select the lowest of all the values to at least make sure
# we don't miss any items, although we may then get duplicates ¯\_(ツ)_/¯
if len(next_offsets) > 1:
log.warning('Inconsistent next_offset values: %r. Using lowest value', next_offsets)
return min(next_offsets)
def to_item_id(item, item_cls):
# Coerce a tuple, dict or object to an 'item_cls' instance. Used to create [Parent][Item|Folder]Id instances from a
# variety of input.
if isinstance(item, (BaseItemId, AttachmentId)):
# Allow any BaseItemId subclass to pass unaltered
return item
if isinstance(item, (BaseFolder, BaseItem)):
try:
return item.to_id()
except ValueError:
return item
if isinstance(item, (str, tuple, list)):
return item_cls(*item)
return item_cls(item.id, item.changekey)
def shape_element(tag, shape, additional_fields, version):
shape_elem = create_element(tag)
add_xml_child(shape_elem, 't:BaseShape', shape)
if additional_fields:
additional_properties = create_element('t:AdditionalProperties')
expanded_fields = chain(*(f.expand(version=version) for f in additional_fields))
# 'path' is insufficient to consistently sort additional properties. For example, we have both
# 'contacts:Companies' and 'task:Companies' with path 'companies'. Sort by both 'field_uri' and 'path'.
# Extended properties do not have a 'field_uri' value.
set_xml_value(additional_properties, sorted(
expanded_fields,
key=lambda f: (getattr(f.field, 'field_uri', ''), f.path)
), version=version)
shape_elem.append(additional_properties)
return shape_elem
def _ids_element(items, item_cls, version, tag):
item_ids = create_element(tag)
for item in items:
set_xml_value(item_ids, to_item_id(item, item_cls), version=version)
return item_ids
def folder_ids_element(folders, version, tag='m:FolderIds'):
return _ids_element(folders, FolderId, version, tag)
def item_ids_element(items, version, tag='m:ItemIds'):
return _ids_element(items, ItemId, version, tag)
def attachment_ids_element(items, version, tag='m:AttachmentIds'):
return _ids_element(items, AttachmentId, version, tag)
def parse_folder_elem(elem, folder, account):
if isinstance(folder, RootOfHierarchy):
f = folder.from_xml(elem=elem, account=folder.account)
elif isinstance(folder, Folder):
f = folder.from_xml_with_root(elem=elem, root=folder.root)
elif isinstance(folder, DistinguishedFolderId):
# We don't know the root, so assume account.root.
for cls in account.root.WELLKNOWN_FOLDERS:
if cls.DISTINGUISHED_FOLDER_ID == folder.id:
folder_cls = cls
break
else:
raise ValueError(f'Unknown distinguished folder ID: {folder.id}')
f = folder_cls.from_xml_with_root(elem=elem, root=account.root)
else:
# 'folder' is a generic FolderId instance. We don't know the root so assume account.root.
f = Folder.from_xml_with_root(elem=elem, root=account.root)
if isinstance(folder, DistinguishedFolderId):
f.is_distinguished = True
elif isinstance(folder, BaseFolder) and folder.is_distinguished:
f.is_distinguished = True
return f
|
import abc
import logging
import traceback
from itertools import chain
from .. import errors
from ..attachments import AttachmentId
from ..credentials import IMPERSONATION, OAuth2Credentials
from ..errors import EWSWarning, TransportError, SOAPError, ErrorTimeoutExpired, ErrorBatchProcessingStopped, \
ErrorQuotaExceeded, ErrorCannotDeleteObject, ErrorCreateItemAccessDenied, ErrorFolderNotFound, \
ErrorNonExistentMailbox, ErrorMailboxStoreUnavailable, ErrorImpersonateUserDenied, ErrorInternalServerError, \
ErrorInternalServerTransientError, ErrorNoRespondingCASInDestinationSite, ErrorImpersonationFailed, \
ErrorMailboxMoveInProgress, ErrorAccessDenied, ErrorConnectionFailed, RateLimitError, ErrorServerBusy, \
ErrorTooManyObjectsOpened, ErrorInvalidLicense, ErrorInvalidSchemaVersionForMailboxVersion, \
ErrorInvalidServerVersion, ErrorItemNotFound, ErrorADUnavailable, ErrorInvalidChangeKey, \
ErrorItemSave, ErrorInvalidIdMalformed, ErrorMessageSizeExceeded, UnauthorizedError, \
ErrorCannotDeleteTaskOccurrence, ErrorMimeContentConversionFailed, ErrorRecurrenceHasNoOccurrence, \
ErrorNoPublicFolderReplicaAvailable, MalformedResponseError, ErrorExceededConnectionCount, \
SessionPoolMinSizeReached, ErrorIncorrectSchemaVersion, ErrorInvalidRequest, ErrorCorruptData, \
ErrorCannotEmptyFolder, ErrorDeleteDistinguishedFolder, ErrorInvalidSubscription, ErrorInvalidWatermark, \
ErrorInvalidSyncStateData, ErrorNameResolutionNoResults, ErrorNameResolutionMultipleResults, \
ErrorConnectionFailedTransientError, ErrorDelegateNoUser, ErrorNotDelegate, InvalidTypeError
from ..folders import BaseFolder, Folder, RootOfHierarchy
from ..items import BaseItem
from ..properties import FieldURI, IndexedFieldURI, ExtendedFieldURI, ExceptionFieldURI, ItemId, FolderId, \
DistinguishedFolderId, BaseItemId
from ..transport import wrap
from ..util import chunkify, create_element, add_xml_child, get_xml_attr, to_xml, post_ratelimited, \
xml_to_str, set_xml_value, SOAPNS, TNS, MNS, ENS, ParseError, DummyResponse
from ..version import API_VERSIONS, Version
log = logging.getLogger(__name__)
PAGE_SIZE = 100 # A default page size for all paging services. This is the number of items we request per page
CHUNK_SIZE = 100 # A default chunk size for all services. This is the number of items we send in a single request
KNOWN_EXCEPTIONS = (
ErrorAccessDenied,
ErrorADUnavailable,
ErrorBatchProcessingStopped,
ErrorCannotDeleteObject,
ErrorCannotEmptyFolder,
ErrorConnectionFailed,
ErrorConnectionFailedTransientError,
ErrorCreateItemAccessDenied,
ErrorDelegateNoUser,
ErrorDeleteDistinguishedFolder,
ErrorExceededConnectionCount,
ErrorFolderNotFound,
ErrorImpersonateUserDenied,
ErrorImpersonationFailed,
ErrorInternalServerError,
ErrorInternalServerTransientError,
ErrorInvalidChangeKey,
ErrorInvalidLicense,
ErrorInvalidSubscription,
ErrorInvalidSyncStateData,
ErrorInvalidWatermark,
ErrorItemNotFound,
ErrorMailboxMoveInProgress,
ErrorMailboxStoreUnavailable,
ErrorNameResolutionMultipleResults,
ErrorNameResolutionNoResults,
ErrorNonExistentMailbox,
ErrorNoPublicFolderReplicaAvailable,
ErrorNoRespondingCASInDestinationSite,
ErrorNotDelegate,
ErrorQuotaExceeded,
ErrorTimeoutExpired,
RateLimitError,
UnauthorizedError,
)
class EWSService(metaclass=abc.ABCMeta):
"""Base class for all EWS services."""
SERVICE_NAME = None # The name of the SOAP service
element_container_name = None # The name of the XML element wrapping the collection of returned items
paging_container_name = None # The name of the element that contains paging information and the paged results
returns_elements = True # If False, the service does not return response elements, just the ResponseCode status
# Return exception instance instead of raising exceptions for the following errors when contained in an element
ERRORS_TO_CATCH_IN_RESPONSE = (
EWSWarning, ErrorCannotDeleteObject, ErrorInvalidChangeKey, ErrorItemNotFound, ErrorItemSave,
ErrorInvalidIdMalformed, ErrorMessageSizeExceeded, ErrorCannotDeleteTaskOccurrence,
ErrorMimeContentConversionFailed, ErrorRecurrenceHasNoOccurrence, ErrorCorruptData
)
# Similarly, define the warnings we want to return unraised
WARNINGS_TO_CATCH_IN_RESPONSE = ErrorBatchProcessingStopped
# Define the warnings we want to ignore, to let response processing proceed
WARNINGS_TO_IGNORE_IN_RESPONSE = ()
# The exception type to raise when all attempted API versions failed
NO_VALID_SERVER_VERSIONS = ErrorInvalidServerVersion
# Marks the version from which the service was introduced
supported_from = None
# Marks services that support paging of requested items
supports_paging = False
def __init__(self, protocol, chunk_size=None, timeout=None):
self.chunk_size = chunk_size or CHUNK_SIZE
if not isinstance(self.chunk_size, int):
raise InvalidTypeError('chunk_size', chunk_size, int)
if self.chunk_size < 1:
raise ValueError(f"'chunk_size' {self.chunk_size} must be a positive number")
if self.supported_from and protocol.version.build < self.supported_from:
raise NotImplementedError(
f'{self.SERVICE_NAME!r} is only supported on {self.supported_from.fullname()!r} and later. '
f'Your current version is {protocol.version.build.fullname()!r}.'
)
self.protocol = protocol
# Allow a service to override the default protocol timeout. Useful for streaming services
self.timeout = timeout
# Controls whether the HTTP request should be streaming or fetch everything at once
self.streaming = False
# Streaming connection variables
self._streaming_session = None
self._streaming_response = None
# The following two methods are the minimum required to be implemented by subclasses, but the name and number of
# kwargs differs between services. Therefore, we cannot make these methods abstract.
# @abc.abstractmethod
# def call(self, **kwargs):
# """Defines the arguments required by the service. Arguments are basic Python types or EWSElement objects.
# Returns either XML objects or EWSElement objects.
# """"
# pass
# @abc.abstractmethod
# def get_payload(self, **kwargs):
# """Using the arguments from .call(), return the payload expected by the service, as an XML object. The XML
# object should consist of a SERVICE_NAME element and everything within that.
# """
# pass
def get(self, expect_result=True, **kwargs):
"""Like .call(), but expects exactly one result from the server, or zero when expect_result=False, or either
zero or one when expect_result=None. Returns either one object or None.
:param expect_result: None, True, or False
:param kwargs: Same as arguments for .call()
:return: Same as .call(), but returns either None or exactly one item
"""
res = list(self.call(**kwargs))
# Raise any errors
for r in res:
if isinstance(r, Exception):
raise r
if expect_result is None and not res:
# Allow empty result
return None
if expect_result is False:
if res:
raise ValueError(f'Expected result length 0, but got {res}')
return None
if len(res) != 1:
raise ValueError(f'Expected result length 1, but got {res}')
return res[0]
def parse(self, xml):
"""Used mostly for testing, when we want to parse static XML data."""
resp = DummyResponse(content=xml, streaming=self.streaming)
_, body = self._get_soap_parts(response=resp)
return self._elems_to_objs(self._get_elements_in_response(response=self._get_soap_messages(body=body)))
def _elems_to_objs(self, elems):
"""Takes a generator of XML elements and exceptions. Returns the equivalent Python objects (or exceptions)."""
for elem in elems:
# Allow None here. Some services don't return an ID if the target folder is outside the mailbox.
if isinstance(elem, (Exception, type(None))):
yield elem
continue
yield self._elem_to_obj(elem)
def _elem_to_obj(self, elem):
if not self.returns_elements:
raise RuntimeError("Incorrect call to method when 'returns_elements' is False")
raise NotImplementedError()
@property
def _version_hint(self):
# We may be here due to version guessing in Protocol.version, so we can't use the self.protocol.version property
return self.protocol.config.version
@_version_hint.setter
def _version_hint(self, value):
self.protocol.config.version = value
def _extra_headers(self, session):
return {}
@property
def _account_to_impersonate(self):
if isinstance(self.protocol.credentials, OAuth2Credentials):
return self.protocol.credentials.identity
return None
@property
def _timezone(self):
return None
def _response_generator(self, payload):
"""Send the payload to the server, and return the response.
:param payload: payload as an XML object
:return: the response, as XML objects
"""
response = self._get_response_xml(payload=payload)
if self.supports_paging:
return (self._get_page(message) for message in response)
return self._get_elements_in_response(response=response)
def _chunked_get_elements(self, payload_func, items, **kwargs):
"""Yield elements in a response. Like ._get_elements(), but chop items into suitable chunks and send multiple
requests.
:param payload_func: A reference to .payload()
:param items: An iterable of items (messages, folders, etc.) to process
:param kwargs: Same as arguments for .call(), except for the 'items' argument
:return: Same as ._get_elements()
"""
# If the input for a service is a QuerySet, it can be difficult to remove exceptions before now
filtered_items = filter(lambda i: not isinstance(i, Exception), items)
for i, chunk in enumerate(chunkify(filtered_items, self.chunk_size), start=1):
log.debug('Processing chunk %s containing %s items', i, len(chunk))
yield from self._get_elements(payload=payload_func(chunk, **kwargs))
def stop_streaming(self):
if not self.streaming:
raise RuntimeError('Attempt to stop a non-streaming service')
if self._streaming_response:
self._streaming_response.close() # Release memory
self._streaming_response = None
if self._streaming_session:
self.protocol.release_session(self._streaming_session)
self._streaming_session = None
def _get_elements(self, payload):
"""Send the payload to be sent and parsed. Handles and re-raise exceptions that are not meant to be returned
to the caller as exception objects. Retry the request according to the retry policy.
"""
while True:
try:
# Create a generator over the response elements so exceptions in response elements are also raised
# here and can be handled.
yield from self._response_generator(payload=payload)
return
except ErrorServerBusy as e:
self._handle_backoff(e)
continue
except KNOWN_EXCEPTIONS:
# These are known and understood, and don't require a backtrace.
raise
except (ErrorTooManyObjectsOpened, ErrorTimeoutExpired) as e:
# ErrorTooManyObjectsOpened means there are too many connections to the Exchange database. This is very
# often a symptom of sending too many requests.
#
# ErrorTimeoutExpired can be caused by a busy server, or by overly large requests. Start by lowering the
# session count. This is done by downstream code.
if isinstance(e, ErrorTimeoutExpired) and self.protocol.session_pool_size <= 1:
# We're already as low as we can go, so downstream cannot limit the session count to put less load
# on the server. We don't have a way of lowering the page size of requests from
# this part of the code yet. Let the user handle this.
raise e
# Re-raise as an ErrorServerBusy with a default delay of 5 minutes
raise ErrorServerBusy(f'Reraised from {e.__class__.__name__}({e})')
except Exception:
# This may run in a thread, which obfuscates the stack trace. Print trace immediately.
account = self.account if isinstance(self, EWSAccountService) else None
log.warning('Account %s: Exception in _get_elements: %s', account, traceback.format_exc(20))
raise
finally:
if self.streaming:
self.stop_streaming()
def _handle_response_cookies(self, session):
pass
def _get_response(self, payload, api_version):
"""Send the actual HTTP request and get the response."""
session = self.protocol.get_session()
if self.streaming:
# Make sure to clean up lingering resources
self.stop_streaming()
r, session = post_ratelimited(
protocol=self.protocol,
session=session,
url=self.protocol.service_endpoint,
headers=self._extra_headers(session),
data=wrap(
content=payload,
api_version=api_version,
account_to_impersonate=self._account_to_impersonate,
timezone=self._timezone,
),
allow_redirects=False,
stream=self.streaming,
timeout=self.timeout or self.protocol.TIMEOUT,
)
self._handle_response_cookies(session)
if self.streaming:
# We con only release the session when we have fully consumed the response. Save session and response
# objects for later.
self._streaming_session, self._streaming_response = session, r
else:
self.protocol.release_session(session)
return r
@property
def _api_versions_to_try(self):
# Put the hint first in the list, and then all other versions except the hint, from newest to oldest
return (self._version_hint.api_version,) + tuple(v for v in API_VERSIONS if v != self._version_hint.api_version)
def _get_response_xml(self, payload, **parse_opts):
"""Send the payload to the server and return relevant elements from the result. Several things happen here:
* The payload is wrapped in SOAP headers and sent to the server
* The Exchange API version is negotiated and stored in the protocol object
* Connection errors are handled and possibly reraised as ErrorServerBusy
* SOAP errors are raised
* EWS errors are raised, or passed on to the caller
:param payload: The request payload, as an XML object
:return: A generator of XML objects or None if the service does not return a result
"""
# Microsoft really doesn't want to make our lives easy. The server may report one version in our initial version
# guessing tango, but then the server may decide that any arbitrary legacy backend server may actually process
# the request for an account. Prepare to handle version-related errors and set the server version per-account.
log.debug('Calling service %s', self.SERVICE_NAME)
for api_version in self._api_versions_to_try:
log.debug('Trying API version %s', api_version)
r = self._get_response(payload=payload, api_version=api_version)
if self.streaming:
# Let 'requests' decode raw data automatically
r.raw.decode_content = True
try:
header, body = self._get_soap_parts(response=r, **parse_opts)
except Exception:
r.close() # Release memory
raise
# The body may contain error messages from Exchange, but we still want to collect version info
if header is not None:
self._update_api_version(api_version=api_version, header=header, **parse_opts)
try:
return self._get_soap_messages(body=body, **parse_opts)
except (ErrorInvalidServerVersion, ErrorIncorrectSchemaVersion, ErrorInvalidRequest,
ErrorInvalidSchemaVersionForMailboxVersion):
# The guessed server version is wrong. Try the next version
log.debug('API version %s was invalid', api_version)
continue
except ErrorExceededConnectionCount as e:
# This indicates that the connecting user has too many open TCP connections to the server. Decrease
# our session pool size.
try:
self.protocol.decrease_poolsize()
continue
except SessionPoolMinSizeReached:
# We're already as low as we can go. Let the user handle this.
raise e
finally:
if not self.streaming:
# In streaming mode, we may not have accessed the raw stream yet. Caller must handle this.
r.close() # Release memory
raise self.NO_VALID_SERVER_VERSIONS(f'Tried versions {self._api_versions_to_try} but all were invalid')
def _handle_backoff(self, e):
"""Take a request from the server to back off and checks the retry policy for what to do. Re-raise the
exception if conditions are not met.
:param e: An ErrorServerBusy instance
:return:
"""
log.debug('Got ErrorServerBusy (back off %s seconds)', e.back_off)
# ErrorServerBusy is very often a symptom of sending too many requests. Scale back connections if possible.
try:
self.protocol.decrease_poolsize()
except SessionPoolMinSizeReached:
pass
if self.protocol.retry_policy.fail_fast:
raise e
self.protocol.retry_policy.back_off(e.back_off)
# We'll warn about this later if we actually need to sleep
def _update_api_version(self, api_version, header, **parse_opts):
"""Parse the server version contained in SOAP headers and update the version hint stored by the caller, if
necessary.
"""
try:
head_version = Version.from_soap_header(requested_api_version=api_version, header=header)
except TransportError as te:
log.debug('Failed to update version info (%s)', te)
return
if self._version_hint == head_version:
# Nothing to do
return
log.debug('Found new version (%s -> %s)', self._version_hint, head_version)
# The api_version that worked was different than our hint, or we never got a build version. Store the working
# version.
self._version_hint = head_version
@classmethod
def _response_tag(cls):
"""Return the name of the element containing the service response."""
return f'{{{MNS}}}{cls.SERVICE_NAME}Response'
@staticmethod
def _response_messages_tag():
"""Return the name of the element containing service response messages."""
return f'{{{MNS}}}ResponseMessages'
@classmethod
def _response_message_tag(cls):
"""Return the name of the element of a single response message."""
return f'{{{MNS}}}{cls.SERVICE_NAME}ResponseMessage'
@classmethod
def _get_soap_parts(cls, response, **parse_opts):
"""Split the SOAP response into its headers an body elements."""
try:
root = to_xml(response.iter_content())
except ParseError as e:
raise SOAPError(f'Bad SOAP response: {e}')
header = root.find(f'{{{SOAPNS}}}Header')
if header is None:
# This is normal when the response contains SOAP-level errors
log.debug('No header in XML response')
body = root.find(f'{{{SOAPNS}}}Body')
if body is None:
raise MalformedResponseError('No Body element in SOAP response')
return header, body
def _get_soap_messages(self, body, **parse_opts):
"""Return the elements in the response containing the response messages. Raises any SOAP exceptions."""
response = body.find(self._response_tag())
if response is None:
fault = body.find(f'{{{SOAPNS}}}Fault')
if fault is None:
raise SOAPError(f'Unknown SOAP response (expected {self._response_tag()} or Fault): {xml_to_str(body)}')
self._raise_soap_errors(fault=fault) # Will throw SOAPError or custom EWS error
response_messages = response.find(self._response_messages_tag())
if response_messages is None:
# Result isn't delivered in a list of FooResponseMessages, but directly in the FooResponse. Consumers expect
# a list, so return a list
return [response]
return response_messages.findall(self._response_message_tag())
@classmethod
def _raise_soap_errors(cls, fault):
"""Parse error messages contained in SOAP headers and raise as exceptions defined in this package."""
# Fault: See http://www.w3.org/TR/2000/NOTE-SOAP-20000508/#_Toc478383507
fault_code = get_xml_attr(fault, 'faultcode')
fault_string = get_xml_attr(fault, 'faultstring')
fault_actor = get_xml_attr(fault, 'faultactor')
detail = fault.find('detail')
if detail is not None:
code, msg = None, ''
if detail.find(f'{{{ENS}}}ResponseCode') is not None:
code = get_xml_attr(detail, f'{{{ENS}}}ResponseCode').strip()
if detail.find(f'{{{ENS}}}Message') is not None:
msg = get_xml_attr(detail, f'{{{ENS}}}Message').strip()
msg_xml = detail.find(f'{{{TNS}}}MessageXml') # Crazy. Here, it's in the TNS namespace
if code == 'ErrorServerBusy':
back_off = None
try:
value = msg_xml.find(f'{{{TNS}}}Value')
if value.get('Name') == 'BackOffMilliseconds':
back_off = int(value.text) / 1000.0 # Convert to seconds
except (TypeError, AttributeError):
pass
raise ErrorServerBusy(msg, back_off=back_off)
if code == 'ErrorSchemaValidation' and msg_xml is not None:
line_number = get_xml_attr(msg_xml, f'{{{TNS}}}LineNumber')
line_position = get_xml_attr(msg_xml, f'{{{TNS}}}LinePosition')
violation = get_xml_attr(msg_xml, f'{{{TNS}}}Violation')
if violation:
msg = f'{msg} {violation}'
if line_number or line_position:
msg = f'{msg} (line: {line_number} position: {line_position})'
try:
raise vars(errors)[code](msg)
except KeyError:
detail = f'{cls.SERVICE_NAME}: code: {code} msg: {msg} ({xml_to_str(detail)})'
try:
raise vars(errors)[fault_code](fault_string)
except KeyError:
pass
raise SOAPError(f'SOAP error code: {fault_code} string: {fault_string} actor: {fault_actor} detail: {detail}')
def _get_element_container(self, message, name=None):
"""Return the XML element in a response element that contains the elements we want the service to return. For
example, in a GetFolder response, 'message' is the GetFolderResponseMessage element, and we return the 'Folders'
element:
<m:GetFolderResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Folders>
<t:Folder>
<t:FolderId Id="AQApA=" ChangeKey="AQAAAB" />
[...]
</t:Folder>
</m:Folders>
</m:GetFolderResponseMessage>
Some service responses don't have a containing element for the returned elements ('name' is None). In
that case, we return the 'SomeServiceResponseMessage' element.
If the response contains a warning or an error message, we raise the relevant exception, unless the error class
is contained in WARNINGS_TO_CATCH_IN_RESPONSE or ERRORS_TO_CATCH_IN_RESPONSE, in which case we return the
exception instance.
"""
# ResponseClass is an XML attribute of various SomeServiceResponseMessage elements: Possible values are:
# Success, Warning, Error. See e.g.
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/finditemresponsemessage
response_class = message.get('ResponseClass')
# ResponseCode, MessageText: See
# https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/responsecode
response_code = get_xml_attr(message, f'{{{MNS}}}ResponseCode')
if response_class == 'Success' and response_code == 'NoError':
if not name:
return message
container = message.find(name)
if container is None:
raise MalformedResponseError(f'No {name} elements in ResponseMessage ({xml_to_str(message)})')
return container
if response_code == 'NoError':
return True
# Raise any non-acceptable errors in the container, or return the container or the acceptable exception instance
msg_text = get_xml_attr(message, f'{{{MNS}}}MessageText')
msg_xml = message.find(f'{{{MNS}}}MessageXml')
if response_class == 'Warning':
try:
raise self._get_exception(code=response_code, text=msg_text, msg_xml=msg_xml)
except self.WARNINGS_TO_CATCH_IN_RESPONSE as e:
return e
except self.WARNINGS_TO_IGNORE_IN_RESPONSE as e:
log.warning(str(e))
container = message.find(name)
if container is None:
raise MalformedResponseError(f'No {name} elements in ResponseMessage ({xml_to_str(message)})')
return container
# rspclass == 'Error', or 'Success' and not 'NoError'
try:
raise self._get_exception(code=response_code, text=msg_text, msg_xml=msg_xml)
except self.ERRORS_TO_CATCH_IN_RESPONSE as e:
return e
@staticmethod
def _get_exception(code, text, msg_xml):
"""Parse error messages contained in EWS responses and raise as exceptions defined in this package."""
if not code:
return TransportError(f'Empty ResponseCode in ResponseMessage (MessageText: {text}, MessageXml: {msg_xml})')
if msg_xml is not None:
# If this is an ErrorInvalidPropertyRequest error, the xml may contain a specific FieldURI
for elem_cls in (FieldURI, IndexedFieldURI, ExtendedFieldURI, ExceptionFieldURI):
elem = msg_xml.find(elem_cls.response_tag())
if elem is not None:
field_uri = elem_cls.from_xml(elem, account=None)
text += f' (field: {field_uri})'
break
# If this is an ErrorInvalidValueForProperty error, the xml may contain the name and value of the property
if code == 'ErrorInvalidValueForProperty':
msg_parts = {}
for elem in msg_xml.findall(f'{{{TNS}}}Value'):
key, val = elem.get('Name'), elem.text
if key:
msg_parts[key] = val
if msg_parts:
text += f" ({', '.join(f'{k}: {v}' for k, v in msg_parts.items())})"
# If this is an ErrorInternalServerError error, the xml may contain a more specific error code
inner_code, inner_text = None, None
for value_elem in msg_xml.findall(f'{{{TNS}}}Value'):
name = value_elem.get('Name')
if name == 'InnerErrorResponseCode':
inner_code = value_elem.text
elif name == 'InnerErrorMessageText':
inner_text = value_elem.text
if inner_code:
try:
# Raise the error as the inner error code
return vars(errors)[inner_code](f'{inner_text} (raised from: {code}({text!r}))')
except KeyError:
# Inner code is unknown to us. Just append to the original text
text += f' (inner error: {inner_code}({inner_text!r}))'
try:
# Raise the error corresponding to the ResponseCode
return vars(errors)[code](text)
except KeyError:
# Should not happen
return TransportError(
f'Unknown ResponseCode in ResponseMessage: {code} (MessageText: {text}, MessageXml: {msg_xml})'
)
def _get_elements_in_response(self, response):
"""Take a list of 'SomeServiceResponseMessage' elements and return the elements in each response message that
we want the service to return. With e.g. 'CreateItem', we get a list of 'CreateItemResponseMessage' elements
and return the 'Message' elements.
<m:CreateItemResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Items>
<t:Message>
<t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/>
</t:Message>
</m:Items>
</m:CreateItemResponseMessage>
<m:CreateItemResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
<m:Items>
<t:Message>
<t:ItemId Id="AQApB=" ChangeKey="AQAAAC"/>
</t:Message>
</m:Items>
</m:CreateItemResponseMessage>
:param response: a list of 'SomeServiceResponseMessage' XML objects
:return: a generator of items as returned by '_get_elements_in_container()
"""
for msg in response:
container_or_exc = self._get_element_container(message=msg, name=self.element_container_name)
if isinstance(container_or_exc, (bool, Exception)):
yield container_or_exc
else:
for c in self._get_elements_in_container(container=container_or_exc):
yield c
@classmethod
def _get_elements_in_container(cls, container):
"""Return a list of response elements from an XML response element container. With e.g.
'CreateItem', 'Items' is the container element and we return the 'Message' child elements:
<m:Items>
<t:Message>
<t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/>
</t:Message>
</m:Items>
If the service does not return response elements, return True to indicate the status. Errors have already been
raised.
"""
if cls.returns_elements:
return list(container)
return [True]
class EWSAccountService(EWSService, metaclass=abc.ABCMeta):
"""Base class for services that act on items concerning a single Mailbox on the server."""
NO_VALID_SERVER_VERSIONS = ErrorInvalidSchemaVersionForMailboxVersion
# Marks services that need affinity to the backend server
prefer_affinity = False
def __init__(self, *args, **kwargs):
self.account = kwargs.pop('account')
kwargs['protocol'] = self.account.protocol
super().__init__(*args, **kwargs)
@property
def _version_hint(self):
return self.account.version
@_version_hint.setter
def _version_hint(self, value):
self.account.version = value
def _handle_response_cookies(self, session):
super()._handle_response_cookies(session=session)
# See self._extra_headers() for documentation on affinity
if self.prefer_affinity:
for cookie in session.cookies:
if cookie.name == 'X-BackEndOverrideCookie':
self.account.affinity_cookie = cookie.value
break
def _extra_headers(self, session):
headers = super()._extra_headers(session=session)
# See
# https://blogs.msdn.microsoft.com/webdav_101/2015/05/11/best-practices-ews-authentication-and-access-issues/
headers['X-AnchorMailbox'] = self.account.primary_smtp_address
# See
# https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/how-to-maintain-affinity-between-group-of-subscriptions-and-mailbox-server
if self.prefer_affinity:
headers['X-PreferServerAffinity'] = 'True'
if self.account.affinity_cookie:
headers['X-BackEndOverrideCookie'] = self.account.affinity_cookie
return headers
@property
def _account_to_impersonate(self):
if self.account.access_type == IMPERSONATION:
return self.account.identity
return None
@property
def _timezone(self):
return self.account.default_timezone
class EWSPagingService(EWSAccountService):
def __init__(self, *args, **kwargs):
self.page_size = kwargs.pop('page_size', None) or PAGE_SIZE
if not isinstance(self.page_size, int):
raise InvalidTypeError('page_size', self.page_size, int)
if self.page_size < 1:
raise ValueError(f"'page_size' {self.page_size} must be a positive number")
super().__init__(*args, **kwargs)
def _paged_call(self, payload_func, max_items, folders, **kwargs):
"""Call a service that supports paging requests. Return a generator over all response items. Keeps track of
all paging-related counters.
"""
paging_infos = {f: dict(item_count=0, next_offset=None) for f in folders}
common_next_offset = kwargs['offset']
total_item_count = 0
while True:
if not paging_infos:
# Paging is done for all folders
break
log.debug('Getting page at offset %s (max_items %s)', common_next_offset, max_items)
kwargs['offset'] = common_next_offset
kwargs['folders'] = paging_infos.keys() # Only request the paging of the remaining folders.
pages = self._get_pages(payload_func, kwargs, len(paging_infos))
for (page, next_offset), (f, paging_info) in zip(pages, list(paging_infos.items())):
paging_info['next_offset'] = next_offset
if isinstance(page, Exception):
# Assume this folder no longer works. Don't attempt to page it again.
log.debug('Exception occurred for folder %s. Removing.', f)
del paging_infos[f]
yield page
continue
if page is not None:
for elem in self._get_elems_from_page(page, max_items, total_item_count):
paging_info['item_count'] += 1
total_item_count += 1
yield elem
if max_items and total_item_count >= max_items:
# No need to continue. Break out of inner loop
log.debug("'max_items' count reached (inner)")
break
if not paging_info['next_offset']:
# Paging is done for this folder. Don't attempt to page it again.
log.debug('Paging has completed for folder %s. Removing.', f)
del paging_infos[f]
continue
log.debug('Folder %s still has items', f)
# Check sanity of paging offsets, but don't fail. When we are iterating huge collections that take a
# long time to complete, the collection may change while we are iterating. This can affect the
# 'next_offset' value and make it inconsistent with the number of already collected items.
# We may have a mismatch if we stopped early due to reaching 'max_items'.
if paging_info['next_offset'] != paging_info['item_count'] and (
not max_items or total_item_count < max_items
):
log.warning('Unexpected next offset: %s -> %s. Maybe the server-side collection has changed?',
paging_info['item_count'], paging_info['next_offset'])
# Also break out of outer loop
if max_items and total_item_count >= max_items:
log.debug("'max_items' count reached (outer)")
break
common_next_offset = self._get_next_offset(paging_infos.values())
if common_next_offset is None:
# Paging is done for all folders
break
@staticmethod
def _get_paging_values(elem):
"""Read paging information from the paging container element."""
offset_attr = elem.get('IndexedPagingOffset')
next_offset = None if offset_attr is None else int(offset_attr)
item_count = int(elem.get('TotalItemsInView'))
is_last_page = elem.get('IncludesLastItemInRange').lower() in ('true', '0')
log.debug('Got page with offset %s, item_count %s, last_page %s', next_offset, item_count, is_last_page)
# Clean up contradictory paging values
if next_offset is None and not is_last_page:
log.debug("Not last page in range, but server didn't send a page offset. Assuming first page")
next_offset = 1
if next_offset is not None and is_last_page:
if next_offset != item_count:
log.debug("Last page in range, but we still got an offset. Assuming paging has completed")
next_offset = None
if not item_count and not is_last_page:
log.debug("Not last page in range, but also no items left. Assuming paging has completed")
next_offset = None
if item_count and next_offset == 0:
log.debug("Non-zero offset, but also no items left. Assuming paging has completed")
next_offset = None
return item_count, next_offset
def _get_page(self, message):
"""Get a single page from a request message, and return the container and next offset."""
paging_elem = self._get_element_container(message=message, name=self.paging_container_name)
if isinstance(paging_elem, Exception):
return paging_elem, None
item_count, next_offset = self._get_paging_values(paging_elem)
if not item_count:
paging_elem = None
return paging_elem, next_offset
def _get_elems_from_page(self, elem, max_items, total_item_count):
container = elem.find(self.element_container_name)
if container is None:
raise MalformedResponseError(
f'No {self.element_container_name} elements in ResponseMessage ({xml_to_str(elem)})'
)
for e in self._get_elements_in_container(container=container):
if max_items and total_item_count >= max_items:
# No need to continue. Break out of elements loop
log.debug("'max_items' count reached (elements)")
break
yield e
def _get_pages(self, payload_func, kwargs, expected_message_count):
"""Request a page, or a list of pages if multiple collections are pages in a single request. Return each
page.
"""
payload = payload_func(**kwargs)
page_elems = list(self._get_elements(payload=payload))
if len(page_elems) != expected_message_count:
raise MalformedResponseError(
f"Expected {expected_message_count} items in 'response', got {len(page_elems)}"
)
return page_elems
@staticmethod
def _get_next_offset(paging_infos):
next_offsets = {p['next_offset'] for p in paging_infos if p['next_offset'] is not None}
if not next_offsets:
# Paging is done for all messages
return None
# We cannot guarantee that all messages that have a next_offset also have the *same* next_offset. This is
# because the collections that we are iterating may change while iterating. We'll do our best but we cannot
# guarantee 100% consistency when large collections are simultaneously being changed on the server.
#
# It's not possible to supply a per-folder offset when iterating multiple folders, so we'll just have to
# choose something that is most likely to work. Select the lowest of all the values to at least make sure
# we don't miss any items, although we may then get duplicates ¯\_(ツ)_/¯
if len(next_offsets) > 1:
log.warning('Inconsistent next_offset values: %r. Using lowest value', next_offsets)
return min(next_offsets)
def to_item_id(item, item_cls):
# Coerce a tuple, dict or object to an 'item_cls' instance. Used to create [Parent][Item|Folder]Id instances from a
# variety of input.
if isinstance(item, (BaseItemId, AttachmentId)):
# Allow any BaseItemId subclass to pass unaltered
return item
if isinstance(item, (BaseFolder, BaseItem)):
try:
return item.to_id()
except ValueError:
return item
if isinstance(item, (str, tuple, list)):
return item_cls(*item)
return item_cls(item.id, item.changekey)
def shape_element(tag, shape, additional_fields, version):
shape_elem = create_element(tag)
add_xml_child(shape_elem, 't:BaseShape', shape)
if additional_fields:
additional_properties = create_element('t:AdditionalProperties')
expanded_fields = chain(*(f.expand(version=version) for f in additional_fields))
# 'path' is insufficient to consistently sort additional properties. For example, we have both
# 'contacts:Companies' and 'task:Companies' with path 'companies'. Sort by both 'field_uri' and 'path'.
# Extended properties do not have a 'field_uri' value.
set_xml_value(additional_properties, sorted(
expanded_fields,
key=lambda f: (getattr(f.field, 'field_uri', ''), f.path)
), version=version)
shape_elem.append(additional_properties)
return shape_elem
def _ids_element(items, item_cls, version, tag):
item_ids = create_element(tag)
for item in items:
set_xml_value(item_ids, to_item_id(item, item_cls), version=version)
return item_ids
def folder_ids_element(folders, version, tag='m:FolderIds'):
return _ids_element(folders, FolderId, version, tag)
def item_ids_element(items, version, tag='m:ItemIds'):
return _ids_element(items, ItemId, version, tag)
def attachment_ids_element(items, version, tag='m:AttachmentIds'):
return _ids_element(items, AttachmentId, version, tag)
def parse_folder_elem(elem, folder, account):
if isinstance(folder, RootOfHierarchy):
f = folder.from_xml(elem=elem, account=folder.account)
elif isinstance(folder, Folder):
f = folder.from_xml_with_root(elem=elem, root=folder.root)
elif isinstance(folder, DistinguishedFolderId):
# We don't know the root, so assume account.root.
for cls in account.root.WELLKNOWN_FOLDERS:
if cls.DISTINGUISHED_FOLDER_ID == folder.id:
folder_cls = cls
break
else:
raise ValueError(f'Unknown distinguished folder ID: {folder.id}')
f = folder_cls.from_xml_with_root(elem=elem, root=account.root)
else:
# 'folder' is a generic FolderId instance. We don't know the root so assume account.root.
f = Folder.from_xml_with_root(elem=elem, root=account.root)
if isinstance(folder, DistinguishedFolderId):
f.is_distinguished = True
elif isinstance(folder, BaseFolder) and folder.is_distinguished:
f.is_distinguished = True
return f
|
en
| 0.828608
|
# A default page size for all paging services. This is the number of items we request per page # A default chunk size for all services. This is the number of items we send in a single request Base class for all EWS services. # The name of the SOAP service # The name of the XML element wrapping the collection of returned items # The name of the element that contains paging information and the paged results # If False, the service does not return response elements, just the ResponseCode status # Return exception instance instead of raising exceptions for the following errors when contained in an element # Similarly, define the warnings we want to return unraised # Define the warnings we want to ignore, to let response processing proceed # The exception type to raise when all attempted API versions failed # Marks the version from which the service was introduced # Marks services that support paging of requested items # Allow a service to override the default protocol timeout. Useful for streaming services # Controls whether the HTTP request should be streaming or fetch everything at once # Streaming connection variables # The following two methods are the minimum required to be implemented by subclasses, but the name and number of # kwargs differs between services. Therefore, we cannot make these methods abstract. # @abc.abstractmethod # def call(self, **kwargs): # """Defines the arguments required by the service. Arguments are basic Python types or EWSElement objects. # Returns either XML objects or EWSElement objects. # """" # pass # @abc.abstractmethod # def get_payload(self, **kwargs): # """Using the arguments from .call(), return the payload expected by the service, as an XML object. The XML # object should consist of a SERVICE_NAME element and everything within that. # """ # pass Like .call(), but expects exactly one result from the server, or zero when expect_result=False, or either zero or one when expect_result=None. Returns either one object or None. :param expect_result: None, True, or False :param kwargs: Same as arguments for .call() :return: Same as .call(), but returns either None or exactly one item # Raise any errors # Allow empty result Used mostly for testing, when we want to parse static XML data. Takes a generator of XML elements and exceptions. Returns the equivalent Python objects (or exceptions). # Allow None here. Some services don't return an ID if the target folder is outside the mailbox. # We may be here due to version guessing in Protocol.version, so we can't use the self.protocol.version property Send the payload to the server, and return the response. :param payload: payload as an XML object :return: the response, as XML objects Yield elements in a response. Like ._get_elements(), but chop items into suitable chunks and send multiple requests. :param payload_func: A reference to .payload() :param items: An iterable of items (messages, folders, etc.) to process :param kwargs: Same as arguments for .call(), except for the 'items' argument :return: Same as ._get_elements() # If the input for a service is a QuerySet, it can be difficult to remove exceptions before now # Release memory Send the payload to be sent and parsed. Handles and re-raise exceptions that are not meant to be returned to the caller as exception objects. Retry the request according to the retry policy. # Create a generator over the response elements so exceptions in response elements are also raised # here and can be handled. # These are known and understood, and don't require a backtrace. # ErrorTooManyObjectsOpened means there are too many connections to the Exchange database. This is very # often a symptom of sending too many requests. # # ErrorTimeoutExpired can be caused by a busy server, or by overly large requests. Start by lowering the # session count. This is done by downstream code. # We're already as low as we can go, so downstream cannot limit the session count to put less load # on the server. We don't have a way of lowering the page size of requests from # this part of the code yet. Let the user handle this. # Re-raise as an ErrorServerBusy with a default delay of 5 minutes # This may run in a thread, which obfuscates the stack trace. Print trace immediately. Send the actual HTTP request and get the response. # Make sure to clean up lingering resources # We con only release the session when we have fully consumed the response. Save session and response # objects for later. # Put the hint first in the list, and then all other versions except the hint, from newest to oldest Send the payload to the server and return relevant elements from the result. Several things happen here: * The payload is wrapped in SOAP headers and sent to the server * The Exchange API version is negotiated and stored in the protocol object * Connection errors are handled and possibly reraised as ErrorServerBusy * SOAP errors are raised * EWS errors are raised, or passed on to the caller :param payload: The request payload, as an XML object :return: A generator of XML objects or None if the service does not return a result # Microsoft really doesn't want to make our lives easy. The server may report one version in our initial version # guessing tango, but then the server may decide that any arbitrary legacy backend server may actually process # the request for an account. Prepare to handle version-related errors and set the server version per-account. # Let 'requests' decode raw data automatically # Release memory # The body may contain error messages from Exchange, but we still want to collect version info # The guessed server version is wrong. Try the next version # This indicates that the connecting user has too many open TCP connections to the server. Decrease # our session pool size. # We're already as low as we can go. Let the user handle this. # In streaming mode, we may not have accessed the raw stream yet. Caller must handle this. # Release memory Take a request from the server to back off and checks the retry policy for what to do. Re-raise the exception if conditions are not met. :param e: An ErrorServerBusy instance :return: # ErrorServerBusy is very often a symptom of sending too many requests. Scale back connections if possible. # We'll warn about this later if we actually need to sleep Parse the server version contained in SOAP headers and update the version hint stored by the caller, if necessary. # Nothing to do # The api_version that worked was different than our hint, or we never got a build version. Store the working # version. Return the name of the element containing the service response. Return the name of the element containing service response messages. Return the name of the element of a single response message. Split the SOAP response into its headers an body elements. # This is normal when the response contains SOAP-level errors Return the elements in the response containing the response messages. Raises any SOAP exceptions. # Will throw SOAPError or custom EWS error # Result isn't delivered in a list of FooResponseMessages, but directly in the FooResponse. Consumers expect # a list, so return a list Parse error messages contained in SOAP headers and raise as exceptions defined in this package. # Fault: See http://www.w3.org/TR/2000/NOTE-SOAP-20000508/#_Toc478383507 # Crazy. Here, it's in the TNS namespace # Convert to seconds Return the XML element in a response element that contains the elements we want the service to return. For example, in a GetFolder response, 'message' is the GetFolderResponseMessage element, and we return the 'Folders' element: <m:GetFolderResponseMessage ResponseClass="Success"> <m:ResponseCode>NoError</m:ResponseCode> <m:Folders> <t:Folder> <t:FolderId Id="AQApA=" ChangeKey="AQAAAB" /> [...] </t:Folder> </m:Folders> </m:GetFolderResponseMessage> Some service responses don't have a containing element for the returned elements ('name' is None). In that case, we return the 'SomeServiceResponseMessage' element. If the response contains a warning or an error message, we raise the relevant exception, unless the error class is contained in WARNINGS_TO_CATCH_IN_RESPONSE or ERRORS_TO_CATCH_IN_RESPONSE, in which case we return the exception instance. # ResponseClass is an XML attribute of various SomeServiceResponseMessage elements: Possible values are: # Success, Warning, Error. See e.g. # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/finditemresponsemessage # ResponseCode, MessageText: See # https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/responsecode # Raise any non-acceptable errors in the container, or return the container or the acceptable exception instance # rspclass == 'Error', or 'Success' and not 'NoError' Parse error messages contained in EWS responses and raise as exceptions defined in this package. # If this is an ErrorInvalidPropertyRequest error, the xml may contain a specific FieldURI # If this is an ErrorInvalidValueForProperty error, the xml may contain the name and value of the property # If this is an ErrorInternalServerError error, the xml may contain a more specific error code # Raise the error as the inner error code # Inner code is unknown to us. Just append to the original text # Raise the error corresponding to the ResponseCode # Should not happen Take a list of 'SomeServiceResponseMessage' elements and return the elements in each response message that we want the service to return. With e.g. 'CreateItem', we get a list of 'CreateItemResponseMessage' elements and return the 'Message' elements. <m:CreateItemResponseMessage ResponseClass="Success"> <m:ResponseCode>NoError</m:ResponseCode> <m:Items> <t:Message> <t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/> </t:Message> </m:Items> </m:CreateItemResponseMessage> <m:CreateItemResponseMessage ResponseClass="Success"> <m:ResponseCode>NoError</m:ResponseCode> <m:Items> <t:Message> <t:ItemId Id="AQApB=" ChangeKey="AQAAAC"/> </t:Message> </m:Items> </m:CreateItemResponseMessage> :param response: a list of 'SomeServiceResponseMessage' XML objects :return: a generator of items as returned by '_get_elements_in_container() Return a list of response elements from an XML response element container. With e.g. 'CreateItem', 'Items' is the container element and we return the 'Message' child elements: <m:Items> <t:Message> <t:ItemId Id="AQApA=" ChangeKey="AQAAAB"/> </t:Message> </m:Items> If the service does not return response elements, return True to indicate the status. Errors have already been raised. Base class for services that act on items concerning a single Mailbox on the server. # Marks services that need affinity to the backend server # See self._extra_headers() for documentation on affinity # See # https://blogs.msdn.microsoft.com/webdav_101/2015/05/11/best-practices-ews-authentication-and-access-issues/ # See # https://docs.microsoft.com/en-us/exchange/client-developer/exchange-web-services/how-to-maintain-affinity-between-group-of-subscriptions-and-mailbox-server Call a service that supports paging requests. Return a generator over all response items. Keeps track of all paging-related counters. # Paging is done for all folders # Only request the paging of the remaining folders. # Assume this folder no longer works. Don't attempt to page it again. # No need to continue. Break out of inner loop # Paging is done for this folder. Don't attempt to page it again. # Check sanity of paging offsets, but don't fail. When we are iterating huge collections that take a # long time to complete, the collection may change while we are iterating. This can affect the # 'next_offset' value and make it inconsistent with the number of already collected items. # We may have a mismatch if we stopped early due to reaching 'max_items'. # Also break out of outer loop # Paging is done for all folders Read paging information from the paging container element. # Clean up contradictory paging values Get a single page from a request message, and return the container and next offset. # No need to continue. Break out of elements loop Request a page, or a list of pages if multiple collections are pages in a single request. Return each page. # Paging is done for all messages # We cannot guarantee that all messages that have a next_offset also have the *same* next_offset. This is # because the collections that we are iterating may change while iterating. We'll do our best but we cannot # guarantee 100% consistency when large collections are simultaneously being changed on the server. # # It's not possible to supply a per-folder offset when iterating multiple folders, so we'll just have to # choose something that is most likely to work. Select the lowest of all the values to at least make sure # we don't miss any items, although we may then get duplicates ¯\_(ツ)_/¯ # Coerce a tuple, dict or object to an 'item_cls' instance. Used to create [Parent][Item|Folder]Id instances from a # variety of input. # Allow any BaseItemId subclass to pass unaltered # 'path' is insufficient to consistently sort additional properties. For example, we have both # 'contacts:Companies' and 'task:Companies' with path 'companies'. Sort by both 'field_uri' and 'path'. # Extended properties do not have a 'field_uri' value. # We don't know the root, so assume account.root. # 'folder' is a generic FolderId instance. We don't know the root so assume account.root.
| 1.474452
| 1
|
MISC/fmprep.py
|
varenius/oso
| 0
|
6625608
|
#!/usr/bin/env python3
import sys, os, glob
import datetime, time
import numpy as np
import matplotlib.dates as mdates
from subprocess import run, PIPE
import socket
def filltracks(of):
of.write("$TRACKS;\n")
of.write(" def OTT;\n")
of.write(" track_frame_format = VDIF;\n")
of.write(" enddef;\n")
def fillmode(of):
of.write("$MODE;\n")
of.write(" def VGEO-X8.XX;\n")
of.write(" ref $FREQ = OTT:Oe:Ow;\n")
of.write(" ref $BBC = OTT:Oe:Ow;\n")
of.write(" ref $IF = OTT:Oe:Ow;\n")
of.write(" ref $TRACKS = OTT:Oe:Ow;\n")
of.write(" ref $PHASE_CAL_DETECT = OTT:Oe:Ow;\n")
of.write(" enddef;\n")
def fillbbc(of):
of.write("$BBC;\n")
of.write(" def OTT;\n")
of.write(" BBC_assign = &BBC01 : 01 : &IF_1N;\n")
of.write(" BBC_assign = &BBC02 : 02 : &IF_1N;\n")
of.write(" BBC_assign = &BBC03 : 03 : &IF_1N;\n")
of.write(" BBC_assign = &BBC04 : 04 : &IF_1N;\n")
of.write(" BBC_assign = &BBC05 : 05 : &IF_1N;\n")
of.write(" BBC_assign = &BBC06 : 06 : &IF_1N;\n")
of.write(" BBC_assign = &BBC07 : 07 : &IF_1N;\n")
of.write(" BBC_assign = &BBC08 : 08 : &IF_1N;\n")
of.write(" BBC_assign = &BBC09 : 09 : &IF_3N;\n")
of.write(" BBC_assign = &BBC10 : 10 : &IF_3N;\n")
of.write(" BBC_assign = &BBC11 : 11 : &IF_3N;\n")
of.write(" BBC_assign = &BBC12 : 12 : &IF_3N;\n")
of.write(" BBC_assign = &BBC13 : 13 : &IF_3N;\n")
of.write(" BBC_assign = &BBC14 : 14 : &IF_3N;\n")
of.write(" BBC_assign = &BBC15 : 15 : &IF_3N;\n")
of.write(" BBC_assign = &BBC16 : 16 : &IF_3N;\n")
of.write(" BBC_assign = &BBC17 : 17 : &IF_1N;\n")
of.write(" BBC_assign = &BBC18 : 18 : &IF_1N;\n")
of.write(" BBC_assign = &BBC19 : 19 : &IF_1N;\n")
of.write(" BBC_assign = &BBC20 : 20 : &IF_1N;\n")
of.write(" BBC_assign = &BBC21 : 21 : &IF_1N;\n")
of.write(" BBC_assign = &BBC22 : 22 : &IF_1N;\n")
of.write(" BBC_assign = &BBC23 : 23 : &IF_1N;\n")
of.write(" BBC_assign = &BBC24 : 24 : &IF_1N;\n")
of.write(" BBC_assign = &BBC25 : 25 : &IF_3N;\n")
of.write(" BBC_assign = &BBC26 : 26 : &IF_3N;\n")
of.write(" BBC_assign = &BBC27 : 27 : &IF_3N;\n")
of.write(" BBC_assign = &BBC28 : 28 : &IF_3N;\n")
of.write(" BBC_assign = &BBC29 : 29 : &IF_3N;\n")
of.write(" BBC_assign = &BBC30 : 30 : &IF_3N;\n")
of.write(" BBC_assign = &BBC31 : 31 : &IF_3N;\n")
of.write(" BBC_assign = &BBC32 : 32 : &IF_3N;\n")
of.write(" BBC_assign = &BBC33 : 33 : &IF_1N;\n")
of.write(" BBC_assign = &BBC34 : 34 : &IF_1N;\n")
of.write(" BBC_assign = &BBC35 : 35 : &IF_1N;\n")
of.write(" BBC_assign = &BBC36 : 36 : &IF_1N;\n")
of.write(" BBC_assign = &BBC37 : 37 : &IF_1N;\n")
of.write(" BBC_assign = &BBC38 : 38 : &IF_1N;\n")
of.write(" BBC_assign = &BBC39 : 39 : &IF_1N;\n")
of.write(" BBC_assign = &BBC40 : 40 : &IF_1N;\n")
of.write(" BBC_assign = &BBC41 : 41 : &IF_3N;\n")
of.write(" BBC_assign = &BBC42 : 42 : &IF_3N;\n")
of.write(" BBC_assign = &BBC43 : 43 : &IF_3N;\n")
of.write(" BBC_assign = &BBC44 : 44 : &IF_3N;\n")
of.write(" BBC_assign = &BBC45 : 45 : &IF_3N;\n")
of.write(" BBC_assign = &BBC46 : 46 : &IF_3N;\n")
of.write(" BBC_assign = &BBC47 : 47 : &IF_3N;\n")
of.write(" BBC_assign = &BBC48 : 48 : &IF_3N;\n")
of.write(" BBC_assign = &BBC49 : 49 : &IF_1N;\n")
of.write(" BBC_assign = &BBC50 : 50 : &IF_1N;\n")
of.write(" BBC_assign = &BBC51 : 51 : &IF_1N;\n")
of.write(" BBC_assign = &BBC52 : 52 : &IF_1N;\n")
of.write(" BBC_assign = &BBC53 : 53 : &IF_1N;\n")
of.write(" BBC_assign = &BBC54 : 54 : &IF_1N;\n")
of.write(" BBC_assign = &BBC55 : 55 : &IF_1N;\n")
of.write(" BBC_assign = &BBC56 : 56 : &IF_1N;\n")
of.write(" BBC_assign = &BBC57 : 57 : &IF_3N;\n")
of.write(" BBC_assign = &BBC58 : 58 : &IF_3N;\n")
of.write(" BBC_assign = &BBC59 : 59 : &IF_3N;\n")
of.write(" BBC_assign = &BBC60 : 60 : &IF_3N;\n")
of.write(" BBC_assign = &BBC61 : 61 : &IF_3N;\n")
of.write(" BBC_assign = &BBC62 : 62 : &IF_3N;\n")
of.write(" BBC_assign = &BBC63 : 63 : &IF_3N;\n")
of.write(" BBC_assign = &BBC64 : 64 : &IF_3N;\n")
of.write(" enddef;\n")
def fillfreq(of):
of.write("$FREQ;\n")
of.write(" def OTT;\n")
of.write(" chan_def = &X : 3480.40 MHz : L : 32.000 MHz : &Ch01 : &BBC01 : &L_cal;\n")
of.write(" chan_def = &X : 3448.40 MHz : L : 32.000 MHz : &Ch02 : &BBC02 : &L_cal;\n")
of.write(" chan_def = &X : 3384.40 MHz : L : 32.000 MHz : &Ch03 : &BBC03 : &L_cal;\n")
of.write(" chan_def = &X : 3320.40 MHz : L : 32.000 MHz : &Ch04 : &BBC04 : &L_cal;\n")
of.write(" chan_def = &X : 3224.40 MHz : L : 32.000 MHz : &Ch05 : &BBC05 : &L_cal;\n")
of.write(" chan_def = &X : 3096.40 MHz : L : 32.000 MHz : &Ch06 : &BBC06 : &L_cal;\n")
of.write(" chan_def = &X : 3064.40 MHz : L : 32.000 MHz : &Ch07 : &BBC07 : &L_cal;\n")
of.write(" chan_def = &X : 3032.40 MHz : L : 32.000 MHz : &Ch08 : &BBC08 : &L_cal;\n")
of.write(" chan_def = &X : 3480.40 MHz : L : 32.000 MHz : &Ch09 : &BBC09 : &L_cal;\n")
of.write(" chan_def = &X : 3448.40 MHz : L : 32.000 MHz : &Ch10 : &BBC10 : &L_cal;\n")
of.write(" chan_def = &X : 3384.40 MHz : L : 32.000 MHz : &Ch11 : &BBC11 : &L_cal;\n")
of.write(" chan_def = &X : 3320.40 MHz : L : 32.000 MHz : &Ch12 : &BBC12 : &L_cal;\n")
of.write(" chan_def = &X : 3224.40 MHz : L : 32.000 MHz : &Ch13 : &BBC13 : &L_cal;\n")
of.write(" chan_def = &X : 3096.40 MHz : L : 32.000 MHz : &Ch14 : &BBC14 : &L_cal;\n")
of.write(" chan_def = &X : 3064.40 MHz : L : 32.000 MHz : &Ch15 : &BBC15 : &L_cal;\n")
of.write(" chan_def = &X : 3032.40 MHz : L : 32.000 MHz : &Ch16 : &BBC16 : &L_cal;\n")
of.write(" chan_def = &X : 5720.40 MHz : L : 32.000 MHz : &Ch17 : &BBC17 : &L_cal;\n")
of.write(" chan_def = &X : 5688.40 MHz : L : 32.000 MHz : &Ch18 : &BBC18 : &L_cal;\n")
of.write(" chan_def = &X : 5624.40 MHz : L : 32.000 MHz : &Ch19 : &BBC19 : &L_cal;\n")
of.write(" chan_def = &X : 5560.40 MHz : L : 32.000 MHz : &Ch20 : &BBC20 : &L_cal;\n")
of.write(" chan_def = &X : 5464.40 MHz : L : 32.000 MHz : &Ch21 : &BBC21 : &L_cal;\n")
of.write(" chan_def = &X : 5336.40 MHz : L : 32.000 MHz : &Ch22 : &BBC22 : &L_cal;\n")
of.write(" chan_def = &X : 5304.40 MHz : L : 32.000 MHz : &Ch23 : &BBC23 : &L_cal;\n")
of.write(" chan_def = &X : 5272.40 MHz : L : 32.000 MHz : &Ch24 : &BBC24 : &L_cal;\n")
of.write(" chan_def = &X : 5720.40 MHz : L : 32.000 MHz : &Ch25 : &BBC25 : &L_cal;\n")
of.write(" chan_def = &X : 5688.40 MHz : L : 32.000 MHz : &Ch26 : &BBC26 : &L_cal;\n")
of.write(" chan_def = &X : 5624.40 MHz : L : 32.000 MHz : &Ch27 : &BBC27 : &L_cal;\n")
of.write(" chan_def = &X : 5560.40 MHz : L : 32.000 MHz : &Ch28 : &BBC28 : &L_cal;\n")
of.write(" chan_def = &X : 5464.40 MHz : L : 32.000 MHz : &Ch29 : &BBC29 : &L_cal;\n")
of.write(" chan_def = &X : 5336.40 MHz : L : 32.000 MHz : &Ch30 : &BBC30 : &L_cal;\n")
of.write(" chan_def = &X : 5304.40 MHz : L : 32.000 MHz : &Ch31 : &BBC31 : &L_cal;\n")
of.write(" chan_def = &X : 5272.40 MHz : L : 32.000 MHz : &Ch32 : &BBC32 : &L_cal;\n")
of.write(" chan_def = &X : 6840.40 MHz : L : 32.000 MHz : &Ch33 : &BBC33 : &L_cal;\n")
of.write(" chan_def = &X : 6808.40 MHz : L : 32.000 MHz : &Ch34 : &BBC34 : &L_cal;\n")
of.write(" chan_def = &X : 6744.40 MHz : L : 32.000 MHz : &Ch35 : &BBC35 : &L_cal;\n")
of.write(" chan_def = &X : 6680.40 MHz : L : 32.000 MHz : &Ch36 : &BBC36 : &L_cal;\n")
of.write(" chan_def = &X : 6584.40 MHz : L : 32.000 MHz : &Ch37 : &BBC37 : &L_cal;\n")
of.write(" chan_def = &X : 6456.40 MHz : L : 32.000 MHz : &Ch38 : &BBC38 : &L_cal;\n")
of.write(" chan_def = &X : 6424.40 MHz : L : 32.000 MHz : &Ch39 : &BBC39 : &L_cal;\n")
of.write(" chan_def = &X : 6392.40 MHz : L : 32.000 MHz : &Ch40 : &BBC40 : &L_cal;\n")
of.write(" chan_def = &X : 6840.40 MHz : L : 32.000 MHz : &Ch41 : &BBC41 : &L_cal;\n")
of.write(" chan_def = &X : 6808.40 MHz : L : 32.000 MHz : &Ch42 : &BBC42 : &L_cal;\n")
of.write(" chan_def = &X : 6744.40 MHz : L : 32.000 MHz : &Ch43 : &BBC43 : &L_cal;\n")
of.write(" chan_def = &X : 6680.40 MHz : L : 32.000 MHz : &Ch44 : &BBC44 : &L_cal;\n")
of.write(" chan_def = &X : 6584.40 MHz : L : 32.000 MHz : &Ch45 : &BBC45 : &L_cal;\n")
of.write(" chan_def = &X : 6456.40 MHz : L : 32.000 MHz : &Ch46 : &BBC46 : &L_cal;\n")
of.write(" chan_def = &X : 6424.40 MHz : L : 32.000 MHz : &Ch47 : &BBC47 : &L_cal;\n")
of.write(" chan_def = &X : 6392.40 MHz : L : 32.000 MHz : &Ch48 : &BBC48 : &L_cal;\n")
of.write(" chan_def = &X : 10680.40 MHz : L : 32.000 MHz : &Ch49 : &BBC49 : &L_cal;\n")
of.write(" chan_def = &X : 10648.40 MHz : L : 32.000 MHz : &Ch50 : &BBC50 : &L_cal;\n")
of.write(" chan_def = &X : 10584.40 MHz : L : 32.000 MHz : &Ch51 : &BBC51 : &L_cal;\n")
of.write(" chan_def = &X : 10520.40 MHz : L : 32.000 MHz : &Ch52 : &BBC52 : &L_cal;\n")
of.write(" chan_def = &X : 10424.40 MHz : L : 32.000 MHz : &Ch53 : &BBC53 : &L_cal;\n")
of.write(" chan_def = &X : 10296.40 MHz : L : 32.000 MHz : &Ch54 : &BBC54 : &L_cal;\n")
of.write(" chan_def = &X : 10264.40 MHz : L : 32.000 MHz : &Ch55 : &BBC55 : &L_cal;\n")
of.write(" chan_def = &X : 10232.40 MHz : L : 32.000 MHz : &Ch56 : &BBC56 : &L_cal;\n")
of.write(" chan_def = &X : 10680.40 MHz : L : 32.000 MHz : &Ch57 : &BBC57 : &L_cal;\n")
of.write(" chan_def = &X : 10648.40 MHz : L : 32.000 MHz : &Ch58 : &BBC58 : &L_cal;\n")
of.write(" chan_def = &X : 10584.40 MHz : L : 32.000 MHz : &Ch59 : &BBC59 : &L_cal;\n")
of.write(" chan_def = &X : 10520.40 MHz : L : 32.000 MHz : &Ch60 : &BBC60 : &L_cal;\n")
of.write(" chan_def = &X : 10424.40 MHz : L : 32.000 MHz : &Ch61 : &BBC61 : &L_cal;\n")
of.write(" chan_def = &X : 10296.40 MHz : L : 32.000 MHz : &Ch62 : &BBC62 : &L_cal;\n")
of.write(" chan_def = &X : 10264.40 MHz : L : 32.000 MHz : &Ch63 : &BBC63 : &L_cal;\n")
of.write(" chan_def = &X : 10232.40 MHz : L : 32.000 MHz : &Ch64 : &BBC64 : &L_cal;\n")
of.write(" sample_rate = 64.0 Ms/sec;\n")
of.write(" enddef;\n")
def fillif(of):
of.write("$IF;\n")
of.write(" def OTT;\n")
of.write(" if_def = &IF_1N : 1N : X : 8080.0 MHz : U : 5 MHz : 0 Hz;\n")
of.write(" if_def = &IF_3N : 3N : Y : 8080.0 MHz : U : 5 MHz : 0 Hz;\n")
of.write(" enddef;\n")
def getskd(exp):
run(['scp','fulla:/usr2/sched/'+exp+'.skd', '.'])
def getfslogs(exp):
run(['scp','fulla:/usr2/log/'+exp+'oe.log', '.'])
run(['scp','freja:/usr2/log/'+exp+'ow.log', '.'])
def fillclock(of, fslog):
peculiaroff = {"Ow": [6.183,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"Oe": [6.211,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"On": [1.350,"from multiple exp from Bonn"],
"Is": [1.268,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"Yj": [-0.108, "from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"O8": [5.18, "From Bob C, EVN: O8 = +5.78, On = +1.95 --> O8 = 5.78-(1.95-1.350)=5.18"]
}
vals = []
times = []
for l in open(fslog):
if ("/gps-fmout/" in l ) or ("/gps-maser/" in l) or ("/gps-dbbcout2/" in l):
ls = l.split("/")
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = -float(ls[2]) # negative, as fmout-gps is the "clock early" convention
vals.append(val) # Seconds
times.append(time)
elif ("/fmout-gps/" in l ):
ls = l.split("/")
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = float(ls[2]) # pos, as fmout-gps is the "clock early" convention
vals.append(val) # Seconds
times.append(time)
elif ("!dbe_gps_offset?" in l ):
ls = re.split(r"/|[?]0:|;",l.strip())
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = float(ls[3])
vals.append(val) # Seconds
times.append(time)
vals = np.array(vals)
times = np.array(times)
# Filter outliers
avg = np.average(vals)
std = np.std(vals)
diff = np.abs(vals-avg)
cut = 10*std
# Filtering should really be done first fitting once without filters, removing linear trend, then filter outliers.
# But this works well enough for Onsala big jumps as is.
bad = np.where(diff>cut)
vals = np.delete(vals, bad)
times = np.delete(times,bad)
x = mdates.date2num(times) # decimal days
pf = np.polyfit(x, vals, 1)
p = np.poly1d(pf)
xx = np.linspace(x.min(), x.max(), 100)
dd = mdates.num2date(xx)
fn = os.path.basename(fslog)
station = fn[-6:-5].upper()+fn[-5:-4].lower()
valtime = (dd[0]+datetime.timedelta(minutes=-30)).strftime("%Yy%jd%Hh%Mm%Ss") # Make valid range 30 min before first ref point, just in case first point is after first scan
reftime = (dd[0]).strftime("%Yy%jd%Hh%Mm%Ss") # Integer seconds; we don't need more precision
# Get fitted clock, add peculiar offset
pecoff = peculiaroff[station]
refclock = p(xx.min()) + pecoff[0]*1e-6
rate = pf[0]/(24*3600) # convert to s/s
of.write("*"+station+": Clock without peculiar offset: {0} us\n".format(p(xx.min())*1e6))
of.write("* valid from clock_early clock_early_epoch rate\n")
of.write("def {:s}; clock_early = {:s} : {:.3f} usec : {:s} : {:.3f}e-12; enddef;\n".format(station,valtime,refclock*1e6,reftime,rate*1e12))
def fillEOP(of, start):
os.system('EMAIL_ADDR=<EMAIL> geteop.pl ' + start + ' 5')
for el in open("EOP.txt"):
of.write(el)
def skd2vex(exp):
outvex = exp+".vex"
if os.path.exists(outvex):
os.remove(outvex)
# Convert SKD to VEX
p = run(['/opt/sked/sked', exp+".skd"], stdout=PIPE, input='VEC '+outvex+'\rq\r', encoding='ascii')
def makev2d(exp):
vf = open(exp+".v2d",'w')
vf.write("vex = {0}.vex\n".format(exp))
vf.write("antennas = Oe, Ow\n")
vf.write("nCore=12\n")
vf.write("nThread=1\n")
vf.write("# Ensure we get cross-auto corrs, just in case (i.e. Oe X-pol correlated with Oe Y-pol)\n")
vf.write("exhaustiveAutocorrs = true\n")
vf.write("SETUP default\n")
vf.write("{\n")
vf.write(" tInt=1\n")
vf.write(" # High res to be able to notch-filter RFI on Oe-Ow baseline\n")
vf.write(" fftSpecRes=0.1 \n")
vf.write(" specRes=0.1\n")
vf.write("}\n")
vf.write("DATASTREAM oe0\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe0.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe1\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe1.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe2\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe2.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe3\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe3.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe4\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe4.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe5\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe5.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe6\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe6.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe7\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe7.files\n")
vf.write("}\n")
vf.write("\n")
vf.write("DATASTREAM ow0\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow0.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow1\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow1.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow2\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow2.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow3\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow3.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow4\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow4.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow5\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow5.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow6\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow6.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow7\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow7.files\n")
vf.write("}\n")
vf.write("\n")
vf.write("ANTENNA Ow\n")
vf.write("{\n")
vf.write(" datastreams = ow0, ow1, ow2, ow3, ow4, ow5, ow6, ow7\n")
vf.write(" sampling = REAL\n")
vf.write(" toneSelection = all\n")
vf.write(" phaseCalInt = 5\n")
vf.write(" }\n")
vf.write("\n")
vf.write("ANTENNA Oe\n")
vf.write("{\n")
vf.write(" datastreams = oe0, oe1, oe2, oe3, oe4, oe5, oe6, oe7\n")
vf.write(" sampling = REAL\n")
vf.write(" toneSelection = all\n")
vf.write(" phaseCalInt = 5\n")
vf.write(" }\n")
def makemachines():
ins = glob.glob("*.input")
for i in ins:
b = i.split(".")[0]
mf = open(b + ".machines","w")
for k in range(24):
mf.write(socket.gethostname()+"\n")
mf.close()
def mountfiles(exp):
umount = "fusermount -u /mnt/fmdata"
mount = "vbs_fs /mnt/fmdata -I '{0}*'".format(exp)
gumount = "ssh oper@gyller " + umount
gmount = "ssh oper@gyller " + mount
usshfs = "fusermount -u /mnt/gyller-fmdata"
sshfs = "sshfs oper@10.100.0.15:/mnt/fmdata /mnt/gyller-fmdata"
for c in [umount, mount, gumount, gmount, usshfs, sshfs]:
print("Running command " + c)
os.system(c)
time.sleep(1)
def listfiles(exp):
print("NOTE: Will index all data with vsum. May take 10 minutes or so...")
for mf in range(8):
cmd = "vsum -s /mnt/fmdata/{0}_ow*_{1} > ow{1}.files".format(exp, mf)
print("Running command "+cmd)
os.system(cmd)
for mf in range(8):
cmd = "vsum -s /mnt/gyller-fmdata/{0}_oe*_{1} > oe{1}.files".format(exp, mf)
print("Running command "+cmd)
os.system(cmd)
print("...done indexing data!")
## SCRIPT STARTS HERE
exp = sys.argv[1]
ans = input("Will run preparation actions for experiment " + exp + ". NOTE: This may unmount file-systems causing loss of paths and I/O limits. So, NEVER DO THIS WHEN RECORDING OR CORRELATING unless you know what you are doing! Type 'yes' to proceed:")
if not ans.lower()=="yes":
print("Did not get yes, aborting")
sys.exit(1)
getfslogs(exp)
getskd(exp)
skd2vex(exp)
# Read all lines of VEX file
vex = [l for l in open(exp+".vex")]
keep = True
of = open(exp+".vex","w")
start = ""
for line in vex:
if "begin $MODE" in line:
keep=False
fillmode(of)
if "end $MODE" in line:
keep=True
if "begin $BBC" in line:
keep=False
fillbbc(of)
if "end $BBC" in line:
keep=True
if "begin $FREQ" in line:
keep=False
fillfreq(of)
if "end $FREQ" in line:
keep=True
if "begin $IF" in line:
keep=False
fillif(of)
if "end $IF" in line:
keep=True
if "begin $TRACKS" in line:
keep=False
filltracks(of)
if "end $TRACKS" in line:
of.write(line)
of.write("$CLOCK;\n")
fillclock(of, exp+"oe.log")
fillclock(of, exp+"ow.log")
fillEOP(of, start)
if keep:
of.write(line)
if "start = " in line and start=="":
year = line.split()[2][0:4]
doy = str(int(line.split()[2][5:8])-2)
start = year+"-"+doy
of.close()
makev2d(exp)
mountfiles(exp)
listfiles(exp)
os.system("vex2difx -v -v -v -d "+exp+".v2d")
# Ensure that the CalcServer is running: will restart if already exists
os.system("startCalcServer")
os.system("calcif2 *.calc")
makemachines()
print("SCRIPT FINISHED. Check the output. If all seems OK, start correlation (in a screen!) by running 'startdifx -n -f *.input -v'")
|
#!/usr/bin/env python3
import sys, os, glob
import datetime, time
import numpy as np
import matplotlib.dates as mdates
from subprocess import run, PIPE
import socket
def filltracks(of):
of.write("$TRACKS;\n")
of.write(" def OTT;\n")
of.write(" track_frame_format = VDIF;\n")
of.write(" enddef;\n")
def fillmode(of):
of.write("$MODE;\n")
of.write(" def VGEO-X8.XX;\n")
of.write(" ref $FREQ = OTT:Oe:Ow;\n")
of.write(" ref $BBC = OTT:Oe:Ow;\n")
of.write(" ref $IF = OTT:Oe:Ow;\n")
of.write(" ref $TRACKS = OTT:Oe:Ow;\n")
of.write(" ref $PHASE_CAL_DETECT = OTT:Oe:Ow;\n")
of.write(" enddef;\n")
def fillbbc(of):
of.write("$BBC;\n")
of.write(" def OTT;\n")
of.write(" BBC_assign = &BBC01 : 01 : &IF_1N;\n")
of.write(" BBC_assign = &BBC02 : 02 : &IF_1N;\n")
of.write(" BBC_assign = &BBC03 : 03 : &IF_1N;\n")
of.write(" BBC_assign = &BBC04 : 04 : &IF_1N;\n")
of.write(" BBC_assign = &BBC05 : 05 : &IF_1N;\n")
of.write(" BBC_assign = &BBC06 : 06 : &IF_1N;\n")
of.write(" BBC_assign = &BBC07 : 07 : &IF_1N;\n")
of.write(" BBC_assign = &BBC08 : 08 : &IF_1N;\n")
of.write(" BBC_assign = &BBC09 : 09 : &IF_3N;\n")
of.write(" BBC_assign = &BBC10 : 10 : &IF_3N;\n")
of.write(" BBC_assign = &BBC11 : 11 : &IF_3N;\n")
of.write(" BBC_assign = &BBC12 : 12 : &IF_3N;\n")
of.write(" BBC_assign = &BBC13 : 13 : &IF_3N;\n")
of.write(" BBC_assign = &BBC14 : 14 : &IF_3N;\n")
of.write(" BBC_assign = &BBC15 : 15 : &IF_3N;\n")
of.write(" BBC_assign = &BBC16 : 16 : &IF_3N;\n")
of.write(" BBC_assign = &BBC17 : 17 : &IF_1N;\n")
of.write(" BBC_assign = &BBC18 : 18 : &IF_1N;\n")
of.write(" BBC_assign = &BBC19 : 19 : &IF_1N;\n")
of.write(" BBC_assign = &BBC20 : 20 : &IF_1N;\n")
of.write(" BBC_assign = &BBC21 : 21 : &IF_1N;\n")
of.write(" BBC_assign = &BBC22 : 22 : &IF_1N;\n")
of.write(" BBC_assign = &BBC23 : 23 : &IF_1N;\n")
of.write(" BBC_assign = &BBC24 : 24 : &IF_1N;\n")
of.write(" BBC_assign = &BBC25 : 25 : &IF_3N;\n")
of.write(" BBC_assign = &BBC26 : 26 : &IF_3N;\n")
of.write(" BBC_assign = &BBC27 : 27 : &IF_3N;\n")
of.write(" BBC_assign = &BBC28 : 28 : &IF_3N;\n")
of.write(" BBC_assign = &BBC29 : 29 : &IF_3N;\n")
of.write(" BBC_assign = &BBC30 : 30 : &IF_3N;\n")
of.write(" BBC_assign = &BBC31 : 31 : &IF_3N;\n")
of.write(" BBC_assign = &BBC32 : 32 : &IF_3N;\n")
of.write(" BBC_assign = &BBC33 : 33 : &IF_1N;\n")
of.write(" BBC_assign = &BBC34 : 34 : &IF_1N;\n")
of.write(" BBC_assign = &BBC35 : 35 : &IF_1N;\n")
of.write(" BBC_assign = &BBC36 : 36 : &IF_1N;\n")
of.write(" BBC_assign = &BBC37 : 37 : &IF_1N;\n")
of.write(" BBC_assign = &BBC38 : 38 : &IF_1N;\n")
of.write(" BBC_assign = &BBC39 : 39 : &IF_1N;\n")
of.write(" BBC_assign = &BBC40 : 40 : &IF_1N;\n")
of.write(" BBC_assign = &BBC41 : 41 : &IF_3N;\n")
of.write(" BBC_assign = &BBC42 : 42 : &IF_3N;\n")
of.write(" BBC_assign = &BBC43 : 43 : &IF_3N;\n")
of.write(" BBC_assign = &BBC44 : 44 : &IF_3N;\n")
of.write(" BBC_assign = &BBC45 : 45 : &IF_3N;\n")
of.write(" BBC_assign = &BBC46 : 46 : &IF_3N;\n")
of.write(" BBC_assign = &BBC47 : 47 : &IF_3N;\n")
of.write(" BBC_assign = &BBC48 : 48 : &IF_3N;\n")
of.write(" BBC_assign = &BBC49 : 49 : &IF_1N;\n")
of.write(" BBC_assign = &BBC50 : 50 : &IF_1N;\n")
of.write(" BBC_assign = &BBC51 : 51 : &IF_1N;\n")
of.write(" BBC_assign = &BBC52 : 52 : &IF_1N;\n")
of.write(" BBC_assign = &BBC53 : 53 : &IF_1N;\n")
of.write(" BBC_assign = &BBC54 : 54 : &IF_1N;\n")
of.write(" BBC_assign = &BBC55 : 55 : &IF_1N;\n")
of.write(" BBC_assign = &BBC56 : 56 : &IF_1N;\n")
of.write(" BBC_assign = &BBC57 : 57 : &IF_3N;\n")
of.write(" BBC_assign = &BBC58 : 58 : &IF_3N;\n")
of.write(" BBC_assign = &BBC59 : 59 : &IF_3N;\n")
of.write(" BBC_assign = &BBC60 : 60 : &IF_3N;\n")
of.write(" BBC_assign = &BBC61 : 61 : &IF_3N;\n")
of.write(" BBC_assign = &BBC62 : 62 : &IF_3N;\n")
of.write(" BBC_assign = &BBC63 : 63 : &IF_3N;\n")
of.write(" BBC_assign = &BBC64 : 64 : &IF_3N;\n")
of.write(" enddef;\n")
def fillfreq(of):
of.write("$FREQ;\n")
of.write(" def OTT;\n")
of.write(" chan_def = &X : 3480.40 MHz : L : 32.000 MHz : &Ch01 : &BBC01 : &L_cal;\n")
of.write(" chan_def = &X : 3448.40 MHz : L : 32.000 MHz : &Ch02 : &BBC02 : &L_cal;\n")
of.write(" chan_def = &X : 3384.40 MHz : L : 32.000 MHz : &Ch03 : &BBC03 : &L_cal;\n")
of.write(" chan_def = &X : 3320.40 MHz : L : 32.000 MHz : &Ch04 : &BBC04 : &L_cal;\n")
of.write(" chan_def = &X : 3224.40 MHz : L : 32.000 MHz : &Ch05 : &BBC05 : &L_cal;\n")
of.write(" chan_def = &X : 3096.40 MHz : L : 32.000 MHz : &Ch06 : &BBC06 : &L_cal;\n")
of.write(" chan_def = &X : 3064.40 MHz : L : 32.000 MHz : &Ch07 : &BBC07 : &L_cal;\n")
of.write(" chan_def = &X : 3032.40 MHz : L : 32.000 MHz : &Ch08 : &BBC08 : &L_cal;\n")
of.write(" chan_def = &X : 3480.40 MHz : L : 32.000 MHz : &Ch09 : &BBC09 : &L_cal;\n")
of.write(" chan_def = &X : 3448.40 MHz : L : 32.000 MHz : &Ch10 : &BBC10 : &L_cal;\n")
of.write(" chan_def = &X : 3384.40 MHz : L : 32.000 MHz : &Ch11 : &BBC11 : &L_cal;\n")
of.write(" chan_def = &X : 3320.40 MHz : L : 32.000 MHz : &Ch12 : &BBC12 : &L_cal;\n")
of.write(" chan_def = &X : 3224.40 MHz : L : 32.000 MHz : &Ch13 : &BBC13 : &L_cal;\n")
of.write(" chan_def = &X : 3096.40 MHz : L : 32.000 MHz : &Ch14 : &BBC14 : &L_cal;\n")
of.write(" chan_def = &X : 3064.40 MHz : L : 32.000 MHz : &Ch15 : &BBC15 : &L_cal;\n")
of.write(" chan_def = &X : 3032.40 MHz : L : 32.000 MHz : &Ch16 : &BBC16 : &L_cal;\n")
of.write(" chan_def = &X : 5720.40 MHz : L : 32.000 MHz : &Ch17 : &BBC17 : &L_cal;\n")
of.write(" chan_def = &X : 5688.40 MHz : L : 32.000 MHz : &Ch18 : &BBC18 : &L_cal;\n")
of.write(" chan_def = &X : 5624.40 MHz : L : 32.000 MHz : &Ch19 : &BBC19 : &L_cal;\n")
of.write(" chan_def = &X : 5560.40 MHz : L : 32.000 MHz : &Ch20 : &BBC20 : &L_cal;\n")
of.write(" chan_def = &X : 5464.40 MHz : L : 32.000 MHz : &Ch21 : &BBC21 : &L_cal;\n")
of.write(" chan_def = &X : 5336.40 MHz : L : 32.000 MHz : &Ch22 : &BBC22 : &L_cal;\n")
of.write(" chan_def = &X : 5304.40 MHz : L : 32.000 MHz : &Ch23 : &BBC23 : &L_cal;\n")
of.write(" chan_def = &X : 5272.40 MHz : L : 32.000 MHz : &Ch24 : &BBC24 : &L_cal;\n")
of.write(" chan_def = &X : 5720.40 MHz : L : 32.000 MHz : &Ch25 : &BBC25 : &L_cal;\n")
of.write(" chan_def = &X : 5688.40 MHz : L : 32.000 MHz : &Ch26 : &BBC26 : &L_cal;\n")
of.write(" chan_def = &X : 5624.40 MHz : L : 32.000 MHz : &Ch27 : &BBC27 : &L_cal;\n")
of.write(" chan_def = &X : 5560.40 MHz : L : 32.000 MHz : &Ch28 : &BBC28 : &L_cal;\n")
of.write(" chan_def = &X : 5464.40 MHz : L : 32.000 MHz : &Ch29 : &BBC29 : &L_cal;\n")
of.write(" chan_def = &X : 5336.40 MHz : L : 32.000 MHz : &Ch30 : &BBC30 : &L_cal;\n")
of.write(" chan_def = &X : 5304.40 MHz : L : 32.000 MHz : &Ch31 : &BBC31 : &L_cal;\n")
of.write(" chan_def = &X : 5272.40 MHz : L : 32.000 MHz : &Ch32 : &BBC32 : &L_cal;\n")
of.write(" chan_def = &X : 6840.40 MHz : L : 32.000 MHz : &Ch33 : &BBC33 : &L_cal;\n")
of.write(" chan_def = &X : 6808.40 MHz : L : 32.000 MHz : &Ch34 : &BBC34 : &L_cal;\n")
of.write(" chan_def = &X : 6744.40 MHz : L : 32.000 MHz : &Ch35 : &BBC35 : &L_cal;\n")
of.write(" chan_def = &X : 6680.40 MHz : L : 32.000 MHz : &Ch36 : &BBC36 : &L_cal;\n")
of.write(" chan_def = &X : 6584.40 MHz : L : 32.000 MHz : &Ch37 : &BBC37 : &L_cal;\n")
of.write(" chan_def = &X : 6456.40 MHz : L : 32.000 MHz : &Ch38 : &BBC38 : &L_cal;\n")
of.write(" chan_def = &X : 6424.40 MHz : L : 32.000 MHz : &Ch39 : &BBC39 : &L_cal;\n")
of.write(" chan_def = &X : 6392.40 MHz : L : 32.000 MHz : &Ch40 : &BBC40 : &L_cal;\n")
of.write(" chan_def = &X : 6840.40 MHz : L : 32.000 MHz : &Ch41 : &BBC41 : &L_cal;\n")
of.write(" chan_def = &X : 6808.40 MHz : L : 32.000 MHz : &Ch42 : &BBC42 : &L_cal;\n")
of.write(" chan_def = &X : 6744.40 MHz : L : 32.000 MHz : &Ch43 : &BBC43 : &L_cal;\n")
of.write(" chan_def = &X : 6680.40 MHz : L : 32.000 MHz : &Ch44 : &BBC44 : &L_cal;\n")
of.write(" chan_def = &X : 6584.40 MHz : L : 32.000 MHz : &Ch45 : &BBC45 : &L_cal;\n")
of.write(" chan_def = &X : 6456.40 MHz : L : 32.000 MHz : &Ch46 : &BBC46 : &L_cal;\n")
of.write(" chan_def = &X : 6424.40 MHz : L : 32.000 MHz : &Ch47 : &BBC47 : &L_cal;\n")
of.write(" chan_def = &X : 6392.40 MHz : L : 32.000 MHz : &Ch48 : &BBC48 : &L_cal;\n")
of.write(" chan_def = &X : 10680.40 MHz : L : 32.000 MHz : &Ch49 : &BBC49 : &L_cal;\n")
of.write(" chan_def = &X : 10648.40 MHz : L : 32.000 MHz : &Ch50 : &BBC50 : &L_cal;\n")
of.write(" chan_def = &X : 10584.40 MHz : L : 32.000 MHz : &Ch51 : &BBC51 : &L_cal;\n")
of.write(" chan_def = &X : 10520.40 MHz : L : 32.000 MHz : &Ch52 : &BBC52 : &L_cal;\n")
of.write(" chan_def = &X : 10424.40 MHz : L : 32.000 MHz : &Ch53 : &BBC53 : &L_cal;\n")
of.write(" chan_def = &X : 10296.40 MHz : L : 32.000 MHz : &Ch54 : &BBC54 : &L_cal;\n")
of.write(" chan_def = &X : 10264.40 MHz : L : 32.000 MHz : &Ch55 : &BBC55 : &L_cal;\n")
of.write(" chan_def = &X : 10232.40 MHz : L : 32.000 MHz : &Ch56 : &BBC56 : &L_cal;\n")
of.write(" chan_def = &X : 10680.40 MHz : L : 32.000 MHz : &Ch57 : &BBC57 : &L_cal;\n")
of.write(" chan_def = &X : 10648.40 MHz : L : 32.000 MHz : &Ch58 : &BBC58 : &L_cal;\n")
of.write(" chan_def = &X : 10584.40 MHz : L : 32.000 MHz : &Ch59 : &BBC59 : &L_cal;\n")
of.write(" chan_def = &X : 10520.40 MHz : L : 32.000 MHz : &Ch60 : &BBC60 : &L_cal;\n")
of.write(" chan_def = &X : 10424.40 MHz : L : 32.000 MHz : &Ch61 : &BBC61 : &L_cal;\n")
of.write(" chan_def = &X : 10296.40 MHz : L : 32.000 MHz : &Ch62 : &BBC62 : &L_cal;\n")
of.write(" chan_def = &X : 10264.40 MHz : L : 32.000 MHz : &Ch63 : &BBC63 : &L_cal;\n")
of.write(" chan_def = &X : 10232.40 MHz : L : 32.000 MHz : &Ch64 : &BBC64 : &L_cal;\n")
of.write(" sample_rate = 64.0 Ms/sec;\n")
of.write(" enddef;\n")
def fillif(of):
of.write("$IF;\n")
of.write(" def OTT;\n")
of.write(" if_def = &IF_1N : 1N : X : 8080.0 MHz : U : 5 MHz : 0 Hz;\n")
of.write(" if_def = &IF_3N : 3N : Y : 8080.0 MHz : U : 5 MHz : 0 Hz;\n")
of.write(" enddef;\n")
def getskd(exp):
run(['scp','fulla:/usr2/sched/'+exp+'.skd', '.'])
def getfslogs(exp):
run(['scp','fulla:/usr2/log/'+exp+'oe.log', '.'])
run(['scp','freja:/usr2/log/'+exp+'ow.log', '.'])
def fillclock(of, fslog):
peculiaroff = {"Ow": [6.183,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"Oe": [6.211,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"On": [1.350,"from multiple exp from Bonn"],
"Is": [1.268,"from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"Yj": [-0.108, "from https://github.com/whi-llc/adjust/blob/files/data/bb_po_v1.1.dat"],
"O8": [5.18, "From Bob C, EVN: O8 = +5.78, On = +1.95 --> O8 = 5.78-(1.95-1.350)=5.18"]
}
vals = []
times = []
for l in open(fslog):
if ("/gps-fmout/" in l ) or ("/gps-maser/" in l) or ("/gps-dbbcout2/" in l):
ls = l.split("/")
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = -float(ls[2]) # negative, as fmout-gps is the "clock early" convention
vals.append(val) # Seconds
times.append(time)
elif ("/fmout-gps/" in l ):
ls = l.split("/")
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = float(ls[2]) # pos, as fmout-gps is the "clock early" convention
vals.append(val) # Seconds
times.append(time)
elif ("!dbe_gps_offset?" in l ):
ls = re.split(r"/|[?]0:|;",l.strip())
time = datetime.datetime.strptime(ls[0], "%Y.%j.%H:%M:%S.%f")
val = float(ls[3])
vals.append(val) # Seconds
times.append(time)
vals = np.array(vals)
times = np.array(times)
# Filter outliers
avg = np.average(vals)
std = np.std(vals)
diff = np.abs(vals-avg)
cut = 10*std
# Filtering should really be done first fitting once without filters, removing linear trend, then filter outliers.
# But this works well enough for Onsala big jumps as is.
bad = np.where(diff>cut)
vals = np.delete(vals, bad)
times = np.delete(times,bad)
x = mdates.date2num(times) # decimal days
pf = np.polyfit(x, vals, 1)
p = np.poly1d(pf)
xx = np.linspace(x.min(), x.max(), 100)
dd = mdates.num2date(xx)
fn = os.path.basename(fslog)
station = fn[-6:-5].upper()+fn[-5:-4].lower()
valtime = (dd[0]+datetime.timedelta(minutes=-30)).strftime("%Yy%jd%Hh%Mm%Ss") # Make valid range 30 min before first ref point, just in case first point is after first scan
reftime = (dd[0]).strftime("%Yy%jd%Hh%Mm%Ss") # Integer seconds; we don't need more precision
# Get fitted clock, add peculiar offset
pecoff = peculiaroff[station]
refclock = p(xx.min()) + pecoff[0]*1e-6
rate = pf[0]/(24*3600) # convert to s/s
of.write("*"+station+": Clock without peculiar offset: {0} us\n".format(p(xx.min())*1e6))
of.write("* valid from clock_early clock_early_epoch rate\n")
of.write("def {:s}; clock_early = {:s} : {:.3f} usec : {:s} : {:.3f}e-12; enddef;\n".format(station,valtime,refclock*1e6,reftime,rate*1e12))
def fillEOP(of, start):
os.system('EMAIL_ADDR=<EMAIL> geteop.pl ' + start + ' 5')
for el in open("EOP.txt"):
of.write(el)
def skd2vex(exp):
outvex = exp+".vex"
if os.path.exists(outvex):
os.remove(outvex)
# Convert SKD to VEX
p = run(['/opt/sked/sked', exp+".skd"], stdout=PIPE, input='VEC '+outvex+'\rq\r', encoding='ascii')
def makev2d(exp):
vf = open(exp+".v2d",'w')
vf.write("vex = {0}.vex\n".format(exp))
vf.write("antennas = Oe, Ow\n")
vf.write("nCore=12\n")
vf.write("nThread=1\n")
vf.write("# Ensure we get cross-auto corrs, just in case (i.e. Oe X-pol correlated with Oe Y-pol)\n")
vf.write("exhaustiveAutocorrs = true\n")
vf.write("SETUP default\n")
vf.write("{\n")
vf.write(" tInt=1\n")
vf.write(" # High res to be able to notch-filter RFI on Oe-Ow baseline\n")
vf.write(" fftSpecRes=0.1 \n")
vf.write(" specRes=0.1\n")
vf.write("}\n")
vf.write("DATASTREAM oe0\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe0.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe1\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe1.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe2\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe2.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe3\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe3.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe4\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe4.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe5\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe5.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe6\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe6.files\n")
vf.write("}\n")
vf.write("DATASTREAM oe7\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = oe7.files\n")
vf.write("}\n")
vf.write("\n")
vf.write("DATASTREAM ow0\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow0.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow1\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow1.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow2\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow2.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow3\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow3.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow4\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow4.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow5\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow5.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow6\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow6.files\n")
vf.write("}\n")
vf.write("DATASTREAM ow7\n")
vf.write("{\n")
vf.write(" format = VDIF/8032/2\n")
vf.write(" filelist = ow7.files\n")
vf.write("}\n")
vf.write("\n")
vf.write("ANTENNA Ow\n")
vf.write("{\n")
vf.write(" datastreams = ow0, ow1, ow2, ow3, ow4, ow5, ow6, ow7\n")
vf.write(" sampling = REAL\n")
vf.write(" toneSelection = all\n")
vf.write(" phaseCalInt = 5\n")
vf.write(" }\n")
vf.write("\n")
vf.write("ANTENNA Oe\n")
vf.write("{\n")
vf.write(" datastreams = oe0, oe1, oe2, oe3, oe4, oe5, oe6, oe7\n")
vf.write(" sampling = REAL\n")
vf.write(" toneSelection = all\n")
vf.write(" phaseCalInt = 5\n")
vf.write(" }\n")
def makemachines():
ins = glob.glob("*.input")
for i in ins:
b = i.split(".")[0]
mf = open(b + ".machines","w")
for k in range(24):
mf.write(socket.gethostname()+"\n")
mf.close()
def mountfiles(exp):
umount = "fusermount -u /mnt/fmdata"
mount = "vbs_fs /mnt/fmdata -I '{0}*'".format(exp)
gumount = "ssh oper@gyller " + umount
gmount = "ssh oper@gyller " + mount
usshfs = "fusermount -u /mnt/gyller-fmdata"
sshfs = "sshfs oper@10.100.0.15:/mnt/fmdata /mnt/gyller-fmdata"
for c in [umount, mount, gumount, gmount, usshfs, sshfs]:
print("Running command " + c)
os.system(c)
time.sleep(1)
def listfiles(exp):
print("NOTE: Will index all data with vsum. May take 10 minutes or so...")
for mf in range(8):
cmd = "vsum -s /mnt/fmdata/{0}_ow*_{1} > ow{1}.files".format(exp, mf)
print("Running command "+cmd)
os.system(cmd)
for mf in range(8):
cmd = "vsum -s /mnt/gyller-fmdata/{0}_oe*_{1} > oe{1}.files".format(exp, mf)
print("Running command "+cmd)
os.system(cmd)
print("...done indexing data!")
## SCRIPT STARTS HERE
exp = sys.argv[1]
ans = input("Will run preparation actions for experiment " + exp + ". NOTE: This may unmount file-systems causing loss of paths and I/O limits. So, NEVER DO THIS WHEN RECORDING OR CORRELATING unless you know what you are doing! Type 'yes' to proceed:")
if not ans.lower()=="yes":
print("Did not get yes, aborting")
sys.exit(1)
getfslogs(exp)
getskd(exp)
skd2vex(exp)
# Read all lines of VEX file
vex = [l for l in open(exp+".vex")]
keep = True
of = open(exp+".vex","w")
start = ""
for line in vex:
if "begin $MODE" in line:
keep=False
fillmode(of)
if "end $MODE" in line:
keep=True
if "begin $BBC" in line:
keep=False
fillbbc(of)
if "end $BBC" in line:
keep=True
if "begin $FREQ" in line:
keep=False
fillfreq(of)
if "end $FREQ" in line:
keep=True
if "begin $IF" in line:
keep=False
fillif(of)
if "end $IF" in line:
keep=True
if "begin $TRACKS" in line:
keep=False
filltracks(of)
if "end $TRACKS" in line:
of.write(line)
of.write("$CLOCK;\n")
fillclock(of, exp+"oe.log")
fillclock(of, exp+"ow.log")
fillEOP(of, start)
if keep:
of.write(line)
if "start = " in line and start=="":
year = line.split()[2][0:4]
doy = str(int(line.split()[2][5:8])-2)
start = year+"-"+doy
of.close()
makev2d(exp)
mountfiles(exp)
listfiles(exp)
os.system("vex2difx -v -v -v -d "+exp+".v2d")
# Ensure that the CalcServer is running: will restart if already exists
os.system("startCalcServer")
os.system("calcif2 *.calc")
makemachines()
print("SCRIPT FINISHED. Check the output. If all seems OK, start correlation (in a screen!) by running 'startdifx -n -f *.input -v'")
|
en
| 0.841928
|
#!/usr/bin/env python3 # negative, as fmout-gps is the "clock early" convention # Seconds # pos, as fmout-gps is the "clock early" convention # Seconds # Seconds # Filter outliers # Filtering should really be done first fitting once without filters, removing linear trend, then filter outliers. # But this works well enough for Onsala big jumps as is. # decimal days # Make valid range 30 min before first ref point, just in case first point is after first scan # Integer seconds; we don't need more precision # Get fitted clock, add peculiar offset # convert to s/s # Convert SKD to VEX # High res to be able to notch-filter RFI on Oe-Ow baseline\n") ## SCRIPT STARTS HERE # Read all lines of VEX file # Ensure that the CalcServer is running: will restart if already exists
| 2.236912
| 2
|
groomer.py
|
project-mynt/wallet-groomer
| 0
|
6625609
|
<filename>groomer.py<gh_stars>0
#!/usr/bin/python2
# simple cleanup script, 2012-12-25 <<EMAIL>>
# 2018: updated by brianmct
import sys
import operator
from decimal import *
from bitcoinrpc.authproxy import AuthServiceProxy
import argparse
parser = argparse.ArgumentParser(description='This script generates transaction(s) to cleanup your wallet.\n'
'It looks for the single addresses which have the most small confirmed payments made to them and merges\n'
'all those payments, along with those for any addresses which are all tiny payments, to a single txout.\n'
'It must connect to mynt to inspect your wallet and to get fresh addresses to pay your coin to.')
parser.add_argument('rpc_server', type=str, help='Wallet RPC server info. '
'Example: http://user:password@127.0.0.1:8766')
parser.add_argument('-i', '--max_amt_input', type=float, default=25,
help='The maximum input amount of a single transaction to consolidate (default: 25 MYNT)')
parser.add_argument('-n', '--max_num_tx', type=int, default=500,
help='The maximum number of transactions to consolidate at once. Lower this if you are getting a tx-size error (default: 500)')
parser.add_argument('-o', '--max_amt_per_output', type=float, default=10000,
help='The maximum amount (in MYNT) to send to a single output address (default: 10000 MYNT)')
parser.add_argument('-f', '--fee', type=float, default=0.001,
help='The amount of fees (in MYNT) to use for the transaction')
args = parser.parse_args()
try:
b = AuthServiceProxy(args.rpc_server)
b.getinfo()
except:
print "Couldn't connect to mynt"
exit(1)
min_fee=Decimal(args.fee)
# Loop until wallet is clean
while True:
#Add up the number of small txouts and amounts assigned to each address.
coins=b.listunspent(1,99999999)
scripts={}
for coin in coins:
script=coin['scriptPubKey']
if script not in scripts:
scripts[script]=(0,Decimal(0),0)
if (coin['amount']<Decimal(args.max_amt_input) and coin['amount']>=Decimal(0.01) and coin['confirmations']>100):
scripts[script]=(scripts[script][0]+1,scripts[script][1]+coin['amount'],scripts[script][0]+1)
else:
scripts[script]=(scripts[script][0],scripts[script][1]+coin['amount'],scripts[script][0]+1)
#which script has the largest number of well confirmed small but not dust outputs?
most_overused = max(scripts.iteritems(), key=operator.itemgetter(1))[0]
#If the best we can do doesn't reduce the number of txouts or just moves dust, give up.
if(scripts[most_overused][2]<3 or scripts[most_overused][1]<Decimal(0.01)):
print "Wallet already clean."
exit(0)
usescripts=set([most_overused])
#Also merge in scripts that are all dust, since they can't be spent without merging with something.
for script in scripts.keys():
if scripts[script][1]<Decimal(0.00010000):
usescripts.add(script)
amt=Decimal(0)
txouts=[]
for coin in coins:
if len(txouts) >= args.max_num_tx:
break
if coin['scriptPubKey'] in usescripts:
amt+=coin['amount']
txout={}
txout['txid']=coin['txid']
txout['vout']=coin['vout']
txouts.append(txout)
print 'Creating tx from %d inputs of total value %s:'%(len(txouts),amt)
for script in usescripts:
print ' Script %s has %d txins and %s MYNT value.'%(script,scripts[script][2],str(scripts[script][1]))
out={}
na=amt-min_fee
#One new output per max_amt_per_output MYNT of value to avoid consolidating too much value in too few addresses.
# But don't add an extra output if it would have less than args.max_amt_per_output MYNT.
while na>0:
amount=min(Decimal(args.max_amt_per_output),na)
if ((na-amount)<10):
amount=na
addr=b.getnewaddress('consolidate')
if (Decimal(str(float(amount)))>0):
if addr not in out:
out[addr]=float(0)
out[addr]+=float(amount)
na-=Decimal(str(float(amount)))
print 'Paying %s MYNT (%s fee) to:'%(sum([Decimal(str(out[k])) for k in out.keys()]),amt-sum([Decimal(str(out[k])) for k in out.keys()]))
for o in out.keys():
print ' %s %s'%(o,out[o])
txn=b.createrawtransaction(txouts,out)
a = raw_input('Sign the transaction? y/[n]: ')
if a != 'y':
exit(0)
signed_txn=b.signrawtransaction(txn)
print signed_txn
print 'Bytes: %d Fee: %s'%(len(signed_txn['hex'])/2,amt-sum([Decimal(str(out[x])) for x in out.keys()]))
a = raw_input('Send the transaction? y/[n]: ')
if a != 'y':
exit(0)
txid = b.sendrawtransaction(signed_txn['hex'])
print 'Transaction sent! txid: %s\n' % txid
|
<filename>groomer.py<gh_stars>0
#!/usr/bin/python2
# simple cleanup script, 2012-12-25 <<EMAIL>>
# 2018: updated by brianmct
import sys
import operator
from decimal import *
from bitcoinrpc.authproxy import AuthServiceProxy
import argparse
parser = argparse.ArgumentParser(description='This script generates transaction(s) to cleanup your wallet.\n'
'It looks for the single addresses which have the most small confirmed payments made to them and merges\n'
'all those payments, along with those for any addresses which are all tiny payments, to a single txout.\n'
'It must connect to mynt to inspect your wallet and to get fresh addresses to pay your coin to.')
parser.add_argument('rpc_server', type=str, help='Wallet RPC server info. '
'Example: http://user:password@127.0.0.1:8766')
parser.add_argument('-i', '--max_amt_input', type=float, default=25,
help='The maximum input amount of a single transaction to consolidate (default: 25 MYNT)')
parser.add_argument('-n', '--max_num_tx', type=int, default=500,
help='The maximum number of transactions to consolidate at once. Lower this if you are getting a tx-size error (default: 500)')
parser.add_argument('-o', '--max_amt_per_output', type=float, default=10000,
help='The maximum amount (in MYNT) to send to a single output address (default: 10000 MYNT)')
parser.add_argument('-f', '--fee', type=float, default=0.001,
help='The amount of fees (in MYNT) to use for the transaction')
args = parser.parse_args()
try:
b = AuthServiceProxy(args.rpc_server)
b.getinfo()
except:
print "Couldn't connect to mynt"
exit(1)
min_fee=Decimal(args.fee)
# Loop until wallet is clean
while True:
#Add up the number of small txouts and amounts assigned to each address.
coins=b.listunspent(1,99999999)
scripts={}
for coin in coins:
script=coin['scriptPubKey']
if script not in scripts:
scripts[script]=(0,Decimal(0),0)
if (coin['amount']<Decimal(args.max_amt_input) and coin['amount']>=Decimal(0.01) and coin['confirmations']>100):
scripts[script]=(scripts[script][0]+1,scripts[script][1]+coin['amount'],scripts[script][0]+1)
else:
scripts[script]=(scripts[script][0],scripts[script][1]+coin['amount'],scripts[script][0]+1)
#which script has the largest number of well confirmed small but not dust outputs?
most_overused = max(scripts.iteritems(), key=operator.itemgetter(1))[0]
#If the best we can do doesn't reduce the number of txouts or just moves dust, give up.
if(scripts[most_overused][2]<3 or scripts[most_overused][1]<Decimal(0.01)):
print "Wallet already clean."
exit(0)
usescripts=set([most_overused])
#Also merge in scripts that are all dust, since they can't be spent without merging with something.
for script in scripts.keys():
if scripts[script][1]<Decimal(0.00010000):
usescripts.add(script)
amt=Decimal(0)
txouts=[]
for coin in coins:
if len(txouts) >= args.max_num_tx:
break
if coin['scriptPubKey'] in usescripts:
amt+=coin['amount']
txout={}
txout['txid']=coin['txid']
txout['vout']=coin['vout']
txouts.append(txout)
print 'Creating tx from %d inputs of total value %s:'%(len(txouts),amt)
for script in usescripts:
print ' Script %s has %d txins and %s MYNT value.'%(script,scripts[script][2],str(scripts[script][1]))
out={}
na=amt-min_fee
#One new output per max_amt_per_output MYNT of value to avoid consolidating too much value in too few addresses.
# But don't add an extra output if it would have less than args.max_amt_per_output MYNT.
while na>0:
amount=min(Decimal(args.max_amt_per_output),na)
if ((na-amount)<10):
amount=na
addr=b.getnewaddress('consolidate')
if (Decimal(str(float(amount)))>0):
if addr not in out:
out[addr]=float(0)
out[addr]+=float(amount)
na-=Decimal(str(float(amount)))
print 'Paying %s MYNT (%s fee) to:'%(sum([Decimal(str(out[k])) for k in out.keys()]),amt-sum([Decimal(str(out[k])) for k in out.keys()]))
for o in out.keys():
print ' %s %s'%(o,out[o])
txn=b.createrawtransaction(txouts,out)
a = raw_input('Sign the transaction? y/[n]: ')
if a != 'y':
exit(0)
signed_txn=b.signrawtransaction(txn)
print signed_txn
print 'Bytes: %d Fee: %s'%(len(signed_txn['hex'])/2,amt-sum([Decimal(str(out[x])) for x in out.keys()]))
a = raw_input('Send the transaction? y/[n]: ')
if a != 'y':
exit(0)
txid = b.sendrawtransaction(signed_txn['hex'])
print 'Transaction sent! txid: %s\n' % txid
|
en
| 0.932851
|
#!/usr/bin/python2 # simple cleanup script, 2012-12-25 <<EMAIL>> # 2018: updated by brianmct # Loop until wallet is clean #Add up the number of small txouts and amounts assigned to each address. #which script has the largest number of well confirmed small but not dust outputs? #If the best we can do doesn't reduce the number of txouts or just moves dust, give up. #Also merge in scripts that are all dust, since they can't be spent without merging with something. #One new output per max_amt_per_output MYNT of value to avoid consolidating too much value in too few addresses. # But don't add an extra output if it would have less than args.max_amt_per_output MYNT.
| 2.476896
| 2
|
stacks_queques/flood_fill.py
|
sumitsk/leetcode
| 0
|
6625610
|
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
org_color = image[sr][sc]
num_rows, num_cols = len(image), len(image[0])
done = [[0 for _ in range(num_cols)] for _ in range(num_rows)]
openlist = [(sr,sc)]
while len(openlist)>0:
cell = openlist.pop()
image[cell[0]][cell[1]] = newColor
if not done[cell[0]][cell[1]]:
nghs = self.find_neighbors(cell, num_rows, num_cols)
for ngh in nghs:
if image[ngh[0]][ngh[1]]==org_color and not done[ngh[0]][ngh[1]]:
openlist.append(ngh)
done[cell[0]][cell[1]] = 1
return image
def find_neighbors(self, cell, num_rows, num_cols):
dxdy = [(1,0), (-1,0), (0,1), (0,-1)]
ans = []
for dx,dy in dxdy:
ngh = (cell[0]+dx, cell[1]+dy)
if 0<=ngh[0]<num_rows and 0<=ngh[1]<num_cols:
ans.append(ngh)
return ans
|
class Solution(object):
def floodFill(self, image, sr, sc, newColor):
"""
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
"""
org_color = image[sr][sc]
num_rows, num_cols = len(image), len(image[0])
done = [[0 for _ in range(num_cols)] for _ in range(num_rows)]
openlist = [(sr,sc)]
while len(openlist)>0:
cell = openlist.pop()
image[cell[0]][cell[1]] = newColor
if not done[cell[0]][cell[1]]:
nghs = self.find_neighbors(cell, num_rows, num_cols)
for ngh in nghs:
if image[ngh[0]][ngh[1]]==org_color and not done[ngh[0]][ngh[1]]:
openlist.append(ngh)
done[cell[0]][cell[1]] = 1
return image
def find_neighbors(self, cell, num_rows, num_cols):
dxdy = [(1,0), (-1,0), (0,1), (0,-1)]
ans = []
for dx,dy in dxdy:
ngh = (cell[0]+dx, cell[1]+dy)
if 0<=ngh[0]<num_rows and 0<=ngh[1]<num_cols:
ans.append(ngh)
return ans
|
en
| 0.197573
|
:type image: List[List[int]] :type sr: int :type sc: int :type newColor: int :rtype: List[List[int]]
| 3.312695
| 3
|
kartothek/io/dask/_update.py
|
jonashaag/kartothek
| 0
|
6625611
|
<gh_stars>0
# -*- coding: utf-8 -*-
from functools import partial
from typing import List
import numpy as np
import pandas as pd
from kartothek.io_components.metapartition import (
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.utils import sort_values_categorical
from ._utils import map_delayed
_KTK_HASH_BUCKET = "__KTK_HASH_BUCKET"
def _hash_bucket(df: pd.DataFrame, subset: List[str], num_buckets: int):
"""
Categorize each row of `df` based on the data in the columns `subset`
into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`
"""
if subset is None:
subset = df.columns
hash_arr = pd.util.hash_pandas_object(df[subset], index=False)
buckets = hash_arr % num_buckets
available_bit_widths = np.array([8, 16, 32, 64])
mask = available_bit_widths > np.log2(num_buckets)
bit_width = min(available_bit_widths[mask])
df[_KTK_HASH_BUCKET] = buckets.astype(f"uint{bit_width}")
return df
def _update_dask_partitions_shuffle(
ddf,
table,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
num_buckets,
sort_partitions_by,
bucket_by,
):
if ddf.npartitions == 0:
return ddf
if num_buckets is not None:
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols = [partition_on[0], _KTK_HASH_BUCKET]
else:
group_cols = [partition_on[0]]
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
),
meta=("MetaPartition", "object"),
)
return ddf
def _update_dask_partitions_one_to_one(
delayed_tasks,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
sort_partitions_by,
):
input_to_mps = partial(
parse_input_to_metapartition,
metadata_version=metadata_version,
expected_secondary_indices=secondary_indices,
)
mps = map_delayed(delayed_tasks, input_to_mps)
if sort_partitions_by:
mps = map_delayed(
mps,
MetaPartition.apply,
partial(sort_values_categorical, column=sort_partitions_by),
)
if partition_on:
mps = map_delayed(mps, MetaPartition.partition_on, partition_on)
if secondary_indices:
mps = map_delayed(mps, MetaPartition.build_indices, secondary_indices)
return map_delayed(
mps,
MetaPartition.store_dataframes,
store=store_factory,
df_serializer=df_serializer,
dataset_uuid=dataset_uuid,
)
def _store_partition(
df,
secondary_indices,
sort_partitions_by,
table,
dataset_uuid,
partition_on,
store_factory,
df_serializer,
metadata_version,
):
store = store_factory()
# I don't have access to the group values
mps = parse_input_to_metapartition(
{"data": {table: df}}, metadata_version=metadata_version
)
# delete reference to enable release after partition_on; before index build
del df
if sort_partitions_by:
mps = mps.apply(partial(sort_values_categorical, column=sort_partitions_by))
if partition_on:
mps = mps.partition_on(partition_on)
if secondary_indices:
mps = mps.build_indices(secondary_indices)
return mps.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
|
# -*- coding: utf-8 -*-
from functools import partial
from typing import List
import numpy as np
import pandas as pd
from kartothek.io_components.metapartition import (
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.utils import sort_values_categorical
from ._utils import map_delayed
_KTK_HASH_BUCKET = "__KTK_HASH_BUCKET"
def _hash_bucket(df: pd.DataFrame, subset: List[str], num_buckets: int):
"""
Categorize each row of `df` based on the data in the columns `subset`
into `num_buckets` values. This is based on `pandas.util.hash_pandas_object`
"""
if subset is None:
subset = df.columns
hash_arr = pd.util.hash_pandas_object(df[subset], index=False)
buckets = hash_arr % num_buckets
available_bit_widths = np.array([8, 16, 32, 64])
mask = available_bit_widths > np.log2(num_buckets)
bit_width = min(available_bit_widths[mask])
df[_KTK_HASH_BUCKET] = buckets.astype(f"uint{bit_width}")
return df
def _update_dask_partitions_shuffle(
ddf,
table,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
num_buckets,
sort_partitions_by,
bucket_by,
):
if ddf.npartitions == 0:
return ddf
if num_buckets is not None:
meta = ddf._meta
meta[_KTK_HASH_BUCKET] = np.uint64(0)
ddf = ddf.map_partitions(_hash_bucket, bucket_by, num_buckets, meta=meta)
group_cols = [partition_on[0], _KTK_HASH_BUCKET]
else:
group_cols = [partition_on[0]]
ddf = ddf.groupby(by=group_cols)
ddf = ddf.apply(
partial(
_store_partition,
secondary_indices=secondary_indices,
sort_partitions_by=sort_partitions_by,
table=table,
dataset_uuid=dataset_uuid,
partition_on=partition_on,
store_factory=store_factory,
df_serializer=df_serializer,
metadata_version=metadata_version,
),
meta=("MetaPartition", "object"),
)
return ddf
def _update_dask_partitions_one_to_one(
delayed_tasks,
secondary_indices,
metadata_version,
partition_on,
store_factory,
df_serializer,
dataset_uuid,
sort_partitions_by,
):
input_to_mps = partial(
parse_input_to_metapartition,
metadata_version=metadata_version,
expected_secondary_indices=secondary_indices,
)
mps = map_delayed(delayed_tasks, input_to_mps)
if sort_partitions_by:
mps = map_delayed(
mps,
MetaPartition.apply,
partial(sort_values_categorical, column=sort_partitions_by),
)
if partition_on:
mps = map_delayed(mps, MetaPartition.partition_on, partition_on)
if secondary_indices:
mps = map_delayed(mps, MetaPartition.build_indices, secondary_indices)
return map_delayed(
mps,
MetaPartition.store_dataframes,
store=store_factory,
df_serializer=df_serializer,
dataset_uuid=dataset_uuid,
)
def _store_partition(
df,
secondary_indices,
sort_partitions_by,
table,
dataset_uuid,
partition_on,
store_factory,
df_serializer,
metadata_version,
):
store = store_factory()
# I don't have access to the group values
mps = parse_input_to_metapartition(
{"data": {table: df}}, metadata_version=metadata_version
)
# delete reference to enable release after partition_on; before index build
del df
if sort_partitions_by:
mps = mps.apply(partial(sort_values_categorical, column=sort_partitions_by))
if partition_on:
mps = mps.partition_on(partition_on)
if secondary_indices:
mps = mps.build_indices(secondary_indices)
return mps.store_dataframes(
store=store, dataset_uuid=dataset_uuid, df_serializer=df_serializer
)
|
en
| 0.841186
|
# -*- coding: utf-8 -*- Categorize each row of `df` based on the data in the columns `subset` into `num_buckets` values. This is based on `pandas.util.hash_pandas_object` # I don't have access to the group values # delete reference to enable release after partition_on; before index build
| 2.154409
| 2
|
py/lvmspec/io/spectra.py
|
sdss/lvmspec
| 0
|
6625612
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
lvmspec.io.spectra
=====================
I/O routines for working with spectral grouping files.
"""
from __future__ import absolute_import, division, print_function
import os
import re
import warnings
import time
import numpy as np
import astropy.io.fits as fits
from astropy.table import Table
from lvmutil.depend import add_dependencies
from lvmutil.io import encode_table
from .util import fitsheader, native_endian, add_columns
from .frame import read_frame
from .fibermap import fibermap_comments
from ..spectra import Spectra
def write_spectra(outfile, spec, units=None):
"""
Write Spectra object to FITS file.
This places the metadata into the header of the (empty) primary HDU.
The first extension contains the fibermap, and then HDUs are created for
the different data arrays for each band.
Floating point data is converted to 32 bits before writing.
Args:
outfile (str): path to write
spec (Spectra): the object containing the data
units (str): optional string to use for the BUNIT key of the flux
HDUs for each band.
Returns:
The absolute path to the file that was written.
"""
outfile = os.path.abspath(outfile)
# Create the parent directory, if necessary.
dir, base = os.path.split(outfile)
if not os.path.exists(dir):
os.makedirs(dir)
# Create HDUs from the data
all_hdus = fits.HDUList()
# metadata goes in empty primary HDU
hdr = fitsheader(spec.meta)
add_dependencies(hdr)
all_hdus.append(fits.PrimaryHDU(header=hdr))
# Next is the fibermap
fmap = spec.fibermap.copy()
fmap.meta["EXTNAME"] = "FIBERMAP"
hdu = fits.convenience.table_to_hdu(fmap)
# Add comments for fibermap columns.
for i, colname in enumerate(fmap.dtype.names):
if colname in fibermap_comments:
key = "TTYPE{}".format(i+1)
name = hdu.header[key]
assert name == colname
comment = fibermap_comments[name]
hdu.header[key] = (name, comment)
else:
print('Unknown comment for {}'.format(colname))
all_hdus.append(hdu)
# Now append the data for all bands
for band in spec.bands:
hdu = fits.ImageHDU(name="{}_WAVELENGTH".format(band.upper()))
hdu.header["BUNIT"] = "Angstrom"
hdu.data = spec.wave[band].astype("f8")
all_hdus.append(hdu)
hdu = fits.ImageHDU(name="{}_FLUX".format(band.upper()))
if units is None:
hdu.header["BUNIT"] = "1e-17 erg/(s cm2 Angstrom)"
else:
hdu.header["BUNIT"] = units
hdu.data = spec.flux[band].astype("f4")
all_hdus.append(hdu)
hdu = fits.ImageHDU(name="{}_IVAR".format(band.upper()))
hdu.data = spec.ivar[band].astype("f4")
all_hdus.append(hdu)
if spec.mask is not None:
hdu = fits.CompImageHDU(name="{}_MASK".format(band.upper()))
hdu.data = spec.mask[band].astype(np.uint32)
all_hdus.append(hdu)
if spec.resolution_data is not None:
hdu = fits.ImageHDU(name="{}_RESOLUTION".format(band.upper()))
hdu.data = spec.resolution_data[band].astype("f4")
all_hdus.append(hdu)
if spec.extra is not None:
for ex in spec.extra[band].items():
hdu = fits.ImageHDU(name="{}_{}".format(band.upper(), ex[0]))
hdu.data = ex[1].astype("f4")
all_hdus.append(hdu)
try:
all_hdus.writeto("{}.tmp".format(outfile), overwrite=True, checksum=True)
except TypeError:
all_hdus.writeto("{}.tmp".format(outfile), clobber=True, checksum=True)
os.rename("{}.tmp".format(outfile), outfile)
return outfile
def read_spectra(infile, single=False):
"""
Read Spectra object from FITS file.
This reads data written by the write_spectra function. A new Spectra
object is instantiated and returned.
Args:
infile (str): path to read
single (bool): if True, keep spectra as single precision in memory.
Returns (Spectra):
The object containing the data read from disk.
"""
ftype = np.float64
if single:
ftype = np.float32
infile = os.path.abspath(infile)
if not os.path.isfile(infile):
raise IOError("{} is not a file".format(infile))
hdus = fits.open(infile, mode="readonly")
nhdu = len(hdus)
# load the metadata.
meta = dict(hdus[0].header)
# initialize data objects
bands = []
fmap = None
wave = None
flux = None
ivar = None
mask = None
res = None
extra = None
# For efficiency, go through the HDUs in disk-order. Use the
# extension name to determine where to put the data. We don't
# explicitly copy the data, since that will be done when constructing
# the Spectra object.
for h in range(1, nhdu):
name = hdus[h].header["EXTNAME"]
if name == "FIBERMAP":
fmap = encode_table(Table(hdus[h].data, copy=True).as_array())
else:
# Find the band based on the name
mat = re.match(r"(.*)_(.*)", name)
if mat is None:
raise RuntimeError("FITS extension name {} does not contain the band".format(name))
band = mat.group(1).lower()
type = mat.group(2)
if band not in bands:
bands.append(band)
if type == "WAVELENGTH":
if wave is None:
wave = {}
wave[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "FLUX":
if flux is None:
flux = {}
flux[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "IVAR":
if ivar is None:
ivar = {}
ivar[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "MASK":
if mask is None:
mask = {}
mask[band] = native_endian(hdus[h].data.astype(np.uint32))
elif type == "RESOLUTION":
if res is None:
res = {}
res[band] = native_endian(hdus[h].data.astype(ftype))
else:
# this must be an "extra" HDU
if extra is None:
extra = {}
if band not in extra:
extra[band] = {}
extra[band][type] = native_endian(hdus[h].data.astype(ftype))
# Construct the Spectra object from the data. If there are any
# inconsistencies in the sizes of the arrays read from the file,
# they will be caught by the constructor.
spec = Spectra(bands, wave, flux, ivar, mask=mask, resolution_data=res,
fibermap=fmap, meta=meta, extra=extra, single=single)
hdus.close()
return spec
def read_frame_as_spectra(filename, night, expid, band, single=False):
"""
Read a FITS file containing a Frame and return a Spectra.
A Frame file is very close to a Spectra object (by design), and
only differs by missing the NIGHT and EXPID in the fibermap, as
well as containing only one band of data.
Args:
infile (str): path to read
night (int): the night value to use for all rows of the fibermap.
expid (int): the expid value to use for all rows of the fibermap.
band (str): the name of this band.
single (bool): if True, keep spectra as single precision in memory.
Returns (Spectra):
The object containing the data read from disk.
"""
fr = read_frame(filename)
if fr.fibermap is None:
raise RuntimeError("reading Frame files into Spectra only supported if a fibermap exists")
nspec = len(fr.fibermap)
fmap = np.asarray(fr.fibermap.copy())
if 'TILEID' in fr.meta:
fmap = add_columns(fmap,
['NIGHT', 'EXPID', 'TILEID'],
[np.int32(night), np.int32(expid), np.int32(fr.meta['TILEID'])],
)
else:
fmap = add_columns(fmap,
['NIGHT', 'EXPID'],
[np.int32(night), np.int32(expid)],
)
fmap = encode_table(fmap)
bands = [ band ]
mask = None
if fr.mask is not None:
mask = {band : fr.mask}
res = None
if fr.resolution_data is not None:
res = {band : fr.resolution_data}
extra = None
if fr.chi2pix is not None:
extra = {band : {"CHI2PIX" : fr.chi2pix}}
spec = Spectra(bands, {band : fr.wave}, {band : fr.flux}, {band : fr.ivar},
mask=mask, resolution_data=res, fibermap=fmap, meta=fr.meta,
extra=extra, single=single)
return spec
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
lvmspec.io.spectra
=====================
I/O routines for working with spectral grouping files.
"""
from __future__ import absolute_import, division, print_function
import os
import re
import warnings
import time
import numpy as np
import astropy.io.fits as fits
from astropy.table import Table
from lvmutil.depend import add_dependencies
from lvmutil.io import encode_table
from .util import fitsheader, native_endian, add_columns
from .frame import read_frame
from .fibermap import fibermap_comments
from ..spectra import Spectra
def write_spectra(outfile, spec, units=None):
"""
Write Spectra object to FITS file.
This places the metadata into the header of the (empty) primary HDU.
The first extension contains the fibermap, and then HDUs are created for
the different data arrays for each band.
Floating point data is converted to 32 bits before writing.
Args:
outfile (str): path to write
spec (Spectra): the object containing the data
units (str): optional string to use for the BUNIT key of the flux
HDUs for each band.
Returns:
The absolute path to the file that was written.
"""
outfile = os.path.abspath(outfile)
# Create the parent directory, if necessary.
dir, base = os.path.split(outfile)
if not os.path.exists(dir):
os.makedirs(dir)
# Create HDUs from the data
all_hdus = fits.HDUList()
# metadata goes in empty primary HDU
hdr = fitsheader(spec.meta)
add_dependencies(hdr)
all_hdus.append(fits.PrimaryHDU(header=hdr))
# Next is the fibermap
fmap = spec.fibermap.copy()
fmap.meta["EXTNAME"] = "FIBERMAP"
hdu = fits.convenience.table_to_hdu(fmap)
# Add comments for fibermap columns.
for i, colname in enumerate(fmap.dtype.names):
if colname in fibermap_comments:
key = "TTYPE{}".format(i+1)
name = hdu.header[key]
assert name == colname
comment = fibermap_comments[name]
hdu.header[key] = (name, comment)
else:
print('Unknown comment for {}'.format(colname))
all_hdus.append(hdu)
# Now append the data for all bands
for band in spec.bands:
hdu = fits.ImageHDU(name="{}_WAVELENGTH".format(band.upper()))
hdu.header["BUNIT"] = "Angstrom"
hdu.data = spec.wave[band].astype("f8")
all_hdus.append(hdu)
hdu = fits.ImageHDU(name="{}_FLUX".format(band.upper()))
if units is None:
hdu.header["BUNIT"] = "1e-17 erg/(s cm2 Angstrom)"
else:
hdu.header["BUNIT"] = units
hdu.data = spec.flux[band].astype("f4")
all_hdus.append(hdu)
hdu = fits.ImageHDU(name="{}_IVAR".format(band.upper()))
hdu.data = spec.ivar[band].astype("f4")
all_hdus.append(hdu)
if spec.mask is not None:
hdu = fits.CompImageHDU(name="{}_MASK".format(band.upper()))
hdu.data = spec.mask[band].astype(np.uint32)
all_hdus.append(hdu)
if spec.resolution_data is not None:
hdu = fits.ImageHDU(name="{}_RESOLUTION".format(band.upper()))
hdu.data = spec.resolution_data[band].astype("f4")
all_hdus.append(hdu)
if spec.extra is not None:
for ex in spec.extra[band].items():
hdu = fits.ImageHDU(name="{}_{}".format(band.upper(), ex[0]))
hdu.data = ex[1].astype("f4")
all_hdus.append(hdu)
try:
all_hdus.writeto("{}.tmp".format(outfile), overwrite=True, checksum=True)
except TypeError:
all_hdus.writeto("{}.tmp".format(outfile), clobber=True, checksum=True)
os.rename("{}.tmp".format(outfile), outfile)
return outfile
def read_spectra(infile, single=False):
"""
Read Spectra object from FITS file.
This reads data written by the write_spectra function. A new Spectra
object is instantiated and returned.
Args:
infile (str): path to read
single (bool): if True, keep spectra as single precision in memory.
Returns (Spectra):
The object containing the data read from disk.
"""
ftype = np.float64
if single:
ftype = np.float32
infile = os.path.abspath(infile)
if not os.path.isfile(infile):
raise IOError("{} is not a file".format(infile))
hdus = fits.open(infile, mode="readonly")
nhdu = len(hdus)
# load the metadata.
meta = dict(hdus[0].header)
# initialize data objects
bands = []
fmap = None
wave = None
flux = None
ivar = None
mask = None
res = None
extra = None
# For efficiency, go through the HDUs in disk-order. Use the
# extension name to determine where to put the data. We don't
# explicitly copy the data, since that will be done when constructing
# the Spectra object.
for h in range(1, nhdu):
name = hdus[h].header["EXTNAME"]
if name == "FIBERMAP":
fmap = encode_table(Table(hdus[h].data, copy=True).as_array())
else:
# Find the band based on the name
mat = re.match(r"(.*)_(.*)", name)
if mat is None:
raise RuntimeError("FITS extension name {} does not contain the band".format(name))
band = mat.group(1).lower()
type = mat.group(2)
if band not in bands:
bands.append(band)
if type == "WAVELENGTH":
if wave is None:
wave = {}
wave[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "FLUX":
if flux is None:
flux = {}
flux[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "IVAR":
if ivar is None:
ivar = {}
ivar[band] = native_endian(hdus[h].data.astype(ftype))
elif type == "MASK":
if mask is None:
mask = {}
mask[band] = native_endian(hdus[h].data.astype(np.uint32))
elif type == "RESOLUTION":
if res is None:
res = {}
res[band] = native_endian(hdus[h].data.astype(ftype))
else:
# this must be an "extra" HDU
if extra is None:
extra = {}
if band not in extra:
extra[band] = {}
extra[band][type] = native_endian(hdus[h].data.astype(ftype))
# Construct the Spectra object from the data. If there are any
# inconsistencies in the sizes of the arrays read from the file,
# they will be caught by the constructor.
spec = Spectra(bands, wave, flux, ivar, mask=mask, resolution_data=res,
fibermap=fmap, meta=meta, extra=extra, single=single)
hdus.close()
return spec
def read_frame_as_spectra(filename, night, expid, band, single=False):
"""
Read a FITS file containing a Frame and return a Spectra.
A Frame file is very close to a Spectra object (by design), and
only differs by missing the NIGHT and EXPID in the fibermap, as
well as containing only one band of data.
Args:
infile (str): path to read
night (int): the night value to use for all rows of the fibermap.
expid (int): the expid value to use for all rows of the fibermap.
band (str): the name of this band.
single (bool): if True, keep spectra as single precision in memory.
Returns (Spectra):
The object containing the data read from disk.
"""
fr = read_frame(filename)
if fr.fibermap is None:
raise RuntimeError("reading Frame files into Spectra only supported if a fibermap exists")
nspec = len(fr.fibermap)
fmap = np.asarray(fr.fibermap.copy())
if 'TILEID' in fr.meta:
fmap = add_columns(fmap,
['NIGHT', 'EXPID', 'TILEID'],
[np.int32(night), np.int32(expid), np.int32(fr.meta['TILEID'])],
)
else:
fmap = add_columns(fmap,
['NIGHT', 'EXPID'],
[np.int32(night), np.int32(expid)],
)
fmap = encode_table(fmap)
bands = [ band ]
mask = None
if fr.mask is not None:
mask = {band : fr.mask}
res = None
if fr.resolution_data is not None:
res = {band : fr.resolution_data}
extra = None
if fr.chi2pix is not None:
extra = {band : {"CHI2PIX" : fr.chi2pix}}
spec = Spectra(bands, {band : fr.wave}, {band : fr.flux}, {band : fr.ivar},
mask=mask, resolution_data=res, fibermap=fmap, meta=fr.meta,
extra=extra, single=single)
return spec
|
en
| 0.860513
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst # -*- coding: utf-8 -*- lvmspec.io.spectra ===================== I/O routines for working with spectral grouping files. Write Spectra object to FITS file. This places the metadata into the header of the (empty) primary HDU. The first extension contains the fibermap, and then HDUs are created for the different data arrays for each band. Floating point data is converted to 32 bits before writing. Args: outfile (str): path to write spec (Spectra): the object containing the data units (str): optional string to use for the BUNIT key of the flux HDUs for each band. Returns: The absolute path to the file that was written. # Create the parent directory, if necessary. # Create HDUs from the data # metadata goes in empty primary HDU # Next is the fibermap # Add comments for fibermap columns. # Now append the data for all bands Read Spectra object from FITS file. This reads data written by the write_spectra function. A new Spectra object is instantiated and returned. Args: infile (str): path to read single (bool): if True, keep spectra as single precision in memory. Returns (Spectra): The object containing the data read from disk. # load the metadata. # initialize data objects # For efficiency, go through the HDUs in disk-order. Use the # extension name to determine where to put the data. We don't # explicitly copy the data, since that will be done when constructing # the Spectra object. # Find the band based on the name # this must be an "extra" HDU # Construct the Spectra object from the data. If there are any # inconsistencies in the sizes of the arrays read from the file, # they will be caught by the constructor. Read a FITS file containing a Frame and return a Spectra. A Frame file is very close to a Spectra object (by design), and only differs by missing the NIGHT and EXPID in the fibermap, as well as containing only one band of data. Args: infile (str): path to read night (int): the night value to use for all rows of the fibermap. expid (int): the expid value to use for all rows of the fibermap. band (str): the name of this band. single (bool): if True, keep spectra as single precision in memory. Returns (Spectra): The object containing the data read from disk.
| 2.229407
| 2
|
vendor/packages/translate-toolkit/translate/lang/test_identify.py
|
DESHRAJ/fjord
| 0
|
6625613
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pytest import raises
from translate.lang.identify import LanguageIdentifier
from translate.storage.base import TranslationUnit
TEXT = """
Ästhetik des "Erhabenen" herangezogen.
kostete (hinzu kommen über 6 630 tote
O3 steht für Ozon; es wird in der
NO2 sind wesentlich am "sauren Regen"
Ethik hängt eng mit einer Dauerkrise der
Serumwerk GmbH Dresden, Postfach
,Hundeschläger' für die Dezimierung der
Momente ihrer Erfahrung".
zusammen.
ihren Kampf um Boden wie um
Unsinn, weil die Leute Unsinn wollen
Ressourcen als soziales Entwicklungsproblem".
der Leunabrücke durch Kommune,
Speiseröhre oder bei Atemstörungen von
hob er hervor, daß die Knorpel
"Reisekader" wurden zu DDR-Zeiten
für die soziale Verständigung zugesprochen
hinaus noch viele Fähigkeiten entwickelte.
Adorno).
Frankfurter Vereine. Und
die erste evangelische Schule hatte
beispielsweise die Pfarrkirche, das
gebracht. Offenbar spielt die Schlafposition
Menschlichkeit oder Rechtsstaatlichkeit
Die nun geplante Straße würde im
zum Thema "Der psychisch kranke
ergaben im Zeitraum von 1986 bis 1989
junge Leute sind oft zahlungskräftig in
unter die Bettdecke gerate, könne es sich
Schäden hinterläßt, berechtigt
körperlichen Belastbarkeit. Tatsächlich
von der Drogenpolitik zu reden) oder
Parlament unter syrischem Druck diesen
Jahrhunderten der wuchtige Turm für
auf die Frage aus, wie sich Eltern verhalten
ehemalige Generalsekretär der Partei,
Mark erhöht", sagt Hühsam, "das war bei
Über eine Annonce in einem Frankfurter
der Töpfer ein. Anhand von gefundenen
gut kennt, hatte ihm die wahren Tatsachen
Sechzehn Adorno-Schüler erinnern
und daß ein Weiterdenken der Theorie
für ihre Festlegung sind drei Jahre
Erschütterung Einblick in die Abhängigkeit
der Bauarbeiten sei erst im Laufe des
als neuen Kometen am Kandidatenhimmel
ergaben im Zeitraum von 1986 bis 1989
- ein neuer Beitrag zur Fortschreibung
Triptychon im Sitzungssaal des Ortsbeirates
Karin gab später ein bemerkenswertes
mit dem er darüber reden konnte?
Kunstwerk niemals das Ganze (der Welt
junge Talente vor, die vielleicht irgendwo
der AG Schweizer Straße, einer Initiative
für Stickstoffdioxid; diese Substanzen
Tätigkeit in erster Linie das sportliche
kommentiert worden, sowohl skeptisch
auch durch "eine Unmenge Zuschriften
Grundschule, in deren Gebäude auch die
gegen Streß und die sexuelle Attraktivität.
<NAME> und <NAME> aus
besteht für die Leunabrücke keine rechtliche
auf einem Parteikongreß mittels Abstimmung
Laurentiuskirche.
später der SED beitraten?" Es ist der
- und die Leute wollen Unsinn, weil
früh geboren wurden oder an Muskelschwäche
Grundlage. "Bei einem Brückenbau
Mensch" auf, als ein automatisch flirtender.
und sich inzwischen als Operateur
xx = Schadstoff wird dort nicht
sondern auch für die Geschäftsleute.
Kommunismus eintreten würde. In ihren
NCV-Vorsitzender <NAME> ehrte
Aufsicht bereit sind. *leo
Daseins, in dem sie Möglichkeiten einer
die alten Schwanheimer?" in der
können sich ein Lachen nicht verkneifen.
ist". Die "gesunde Mischung" aus edlen
genannt hatte: "<NAME>,
ist vorbeugend schon 1936 von Adorno
Ruhe ein", sagt <NAME> vor den
Ökologie bald auch
englischen Rasen der Nachbarn. Schon
Forschungsarbeit sich doch noch hat habilitieren
dringend davor, Säuglinge in den ersten
Milligramm je Kubikmeter
Im Gespräch: Indianer aus Kolumbien
wenige Fälle von Plötzlichem Kindstod.
Für nicht empfehlenswert hält er Fußball
SO2 steht für Schwefeldioxid, NO2
Schwanheimer Unterfeldes hin. Rund 110
Adorno 1957 auf eine törichte Dissonanz-Rezension
durch Laute, Lächeln und Greifen
und kamen, um abzustimmen." Doch
daß genau das nach dem Ende des
Zedillo, erst vor kurzem ins Erziehungsministerium
"andere Geschichte", die "unlogische".
Übungen zu integrieren und somit wenigstens
Ausmaße angenommen. Überall wimmelte
ambulant - einen Namen gemacht hat.
Kiesgruben im Unterfeld als
der in der Verfassung festgeschriebenen
Seit 1975 habe er in seinem Fachgebiet
Feuilletons eingerissene Methode, durch
ganz woanders, schon damals
mehr zu machen." Heute verkauft dort
für das existentielle Bewußtsein belegen.
überhöht und verklärt als durchdringt
Tatsächlich hat sich die durchschnittliche
"sehr, sehr schwer". Alle aber entwikkelten
der Bauchlage aufgeklärt wurde, ging der
- und die Leute wollen Unsinn, weil
in der einen Hand das Frühstücksbrötchen,
besitzen. Solche Sportarten
mit einer Aktion zusammen,
nach Bornheim wanderten, um ihren
sind, an den Ausführungsbestimmungen.
Um eventuelle Entsorgungskosten zu
junge Leute sind oft zahlungskräftig in
Zwar versicherte der syrische Vizepräsident
einem internen Korrektiv der Ethik.
Eckpfeiler der einstigen Stadtbefestigung
durchstieß er, als er den Arm auf die
hat es ihm nachgemacht. Und auch
nachgedacht, wie sein Leben wohl verlaufen
wie hoffnungsvoll: "Wie die Toten wehrlos
und ging besonders auf das Handwerk
Syrien.
<NAME>
Brüche glättete, steht er zukünftig
und erschüttert, wird Ästhetik zu
Fitneß-Studio individuell abgestimmte
der Strenge des Bilderverbots.
Carneval-Vereins (NCV) beim traditionellen
bringen, ohne sie dem Diktat versöhnender
und in den Karnevalvereinen -
"""
TEXT_LIST = [u"""
Ästhetik des "Erhabenen" herangezogen.
kostete (hinzu kommen über 6 630 tote""",
u"""O3 steht für Ozon; es wird in der
NO2 sind wesentlich am "sauren Regen"
Ethik hängt eng mit einer Dauerkrise der""",
u"""Serumwerk GmbH Dresden, Postfach
,Hundeschläger' für die Dezimierung der
Momente ihrer Erfahrung".
zusammen.
"""]
class TestLanguageIdentifier(object):
def setup_class(self):
self.langident = LanguageIdentifier()
def test_identify_lang(self):
assert self.langident.identify_lang('') == None
assert self.langident.identify_lang(TEXT) == 'de'
def test_identify_store(self):
langlist = [TranslationUnit(string) for string in TEXT_LIST]
assert self.langident.identify_source_lang(langlist) == 'de'
for i, unit in enumerate(langlist):
unit.target = TEXT_LIST[i]
assert self.langident.identify_target_lang(langlist) == 'de'
def test_bad_init_data(self):
"""Test __init__ with bad conf files and data dirs"""
assert raises(ValueError, LanguageIdentifier, model_dir='missing')
assert raises(ValueError, LanguageIdentifier, conf_file='missing')
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from pytest import raises
from translate.lang.identify import LanguageIdentifier
from translate.storage.base import TranslationUnit
TEXT = """
Ästhetik des "Erhabenen" herangezogen.
kostete (hinzu kommen über 6 630 tote
O3 steht für Ozon; es wird in der
NO2 sind wesentlich am "sauren Regen"
Ethik hängt eng mit einer Dauerkrise der
Serumwerk GmbH Dresden, Postfach
,Hundeschläger' für die Dezimierung der
Momente ihrer Erfahrung".
zusammen.
ihren Kampf um Boden wie um
Unsinn, weil die Leute Unsinn wollen
Ressourcen als soziales Entwicklungsproblem".
der Leunabrücke durch Kommune,
Speiseröhre oder bei Atemstörungen von
hob er hervor, daß die Knorpel
"Reisekader" wurden zu DDR-Zeiten
für die soziale Verständigung zugesprochen
hinaus noch viele Fähigkeiten entwickelte.
Adorno).
Frankfurter Vereine. Und
die erste evangelische Schule hatte
beispielsweise die Pfarrkirche, das
gebracht. Offenbar spielt die Schlafposition
Menschlichkeit oder Rechtsstaatlichkeit
Die nun geplante Straße würde im
zum Thema "Der psychisch kranke
ergaben im Zeitraum von 1986 bis 1989
junge Leute sind oft zahlungskräftig in
unter die Bettdecke gerate, könne es sich
Schäden hinterläßt, berechtigt
körperlichen Belastbarkeit. Tatsächlich
von der Drogenpolitik zu reden) oder
Parlament unter syrischem Druck diesen
Jahrhunderten der wuchtige Turm für
auf die Frage aus, wie sich Eltern verhalten
ehemalige Generalsekretär der Partei,
Mark erhöht", sagt Hühsam, "das war bei
Über eine Annonce in einem Frankfurter
der Töpfer ein. Anhand von gefundenen
gut kennt, hatte ihm die wahren Tatsachen
Sechzehn Adorno-Schüler erinnern
und daß ein Weiterdenken der Theorie
für ihre Festlegung sind drei Jahre
Erschütterung Einblick in die Abhängigkeit
der Bauarbeiten sei erst im Laufe des
als neuen Kometen am Kandidatenhimmel
ergaben im Zeitraum von 1986 bis 1989
- ein neuer Beitrag zur Fortschreibung
Triptychon im Sitzungssaal des Ortsbeirates
Karin gab später ein bemerkenswertes
mit dem er darüber reden konnte?
Kunstwerk niemals das Ganze (der Welt
junge Talente vor, die vielleicht irgendwo
der AG Schweizer Straße, einer Initiative
für Stickstoffdioxid; diese Substanzen
Tätigkeit in erster Linie das sportliche
kommentiert worden, sowohl skeptisch
auch durch "eine Unmenge Zuschriften
Grundschule, in deren Gebäude auch die
gegen Streß und die sexuelle Attraktivität.
<NAME> und <NAME> aus
besteht für die Leunabrücke keine rechtliche
auf einem Parteikongreß mittels Abstimmung
Laurentiuskirche.
später der SED beitraten?" Es ist der
- und die Leute wollen Unsinn, weil
früh geboren wurden oder an Muskelschwäche
Grundlage. "Bei einem Brückenbau
Mensch" auf, als ein automatisch flirtender.
und sich inzwischen als Operateur
xx = Schadstoff wird dort nicht
sondern auch für die Geschäftsleute.
Kommunismus eintreten würde. In ihren
NCV-Vorsitzender <NAME> ehrte
Aufsicht bereit sind. *leo
Daseins, in dem sie Möglichkeiten einer
die alten Schwanheimer?" in der
können sich ein Lachen nicht verkneifen.
ist". Die "gesunde Mischung" aus edlen
genannt hatte: "<NAME>,
ist vorbeugend schon 1936 von Adorno
Ruhe ein", sagt <NAME> vor den
Ökologie bald auch
englischen Rasen der Nachbarn. Schon
Forschungsarbeit sich doch noch hat habilitieren
dringend davor, Säuglinge in den ersten
Milligramm je Kubikmeter
Im Gespräch: Indianer aus Kolumbien
wenige Fälle von Plötzlichem Kindstod.
Für nicht empfehlenswert hält er Fußball
SO2 steht für Schwefeldioxid, NO2
Schwanheimer Unterfeldes hin. Rund 110
Adorno 1957 auf eine törichte Dissonanz-Rezension
durch Laute, Lächeln und Greifen
und kamen, um abzustimmen." Doch
daß genau das nach dem Ende des
Zedillo, erst vor kurzem ins Erziehungsministerium
"andere Geschichte", die "unlogische".
Übungen zu integrieren und somit wenigstens
Ausmaße angenommen. Überall wimmelte
ambulant - einen Namen gemacht hat.
Kiesgruben im Unterfeld als
der in der Verfassung festgeschriebenen
Seit 1975 habe er in seinem Fachgebiet
Feuilletons eingerissene Methode, durch
ganz woanders, schon damals
mehr zu machen." Heute verkauft dort
für das existentielle Bewußtsein belegen.
überhöht und verklärt als durchdringt
Tatsächlich hat sich die durchschnittliche
"sehr, sehr schwer". Alle aber entwikkelten
der Bauchlage aufgeklärt wurde, ging der
- und die Leute wollen Unsinn, weil
in der einen Hand das Frühstücksbrötchen,
besitzen. Solche Sportarten
mit einer Aktion zusammen,
nach Bornheim wanderten, um ihren
sind, an den Ausführungsbestimmungen.
Um eventuelle Entsorgungskosten zu
junge Leute sind oft zahlungskräftig in
Zwar versicherte der syrische Vizepräsident
einem internen Korrektiv der Ethik.
Eckpfeiler der einstigen Stadtbefestigung
durchstieß er, als er den Arm auf die
hat es ihm nachgemacht. Und auch
nachgedacht, wie sein Leben wohl verlaufen
wie hoffnungsvoll: "Wie die Toten wehrlos
und ging besonders auf das Handwerk
Syrien.
<NAME>
Brüche glättete, steht er zukünftig
und erschüttert, wird Ästhetik zu
Fitneß-Studio individuell abgestimmte
der Strenge des Bilderverbots.
Carneval-Vereins (NCV) beim traditionellen
bringen, ohne sie dem Diktat versöhnender
und in den Karnevalvereinen -
"""
TEXT_LIST = [u"""
Ästhetik des "Erhabenen" herangezogen.
kostete (hinzu kommen über 6 630 tote""",
u"""O3 steht für Ozon; es wird in der
NO2 sind wesentlich am "sauren Regen"
Ethik hängt eng mit einer Dauerkrise der""",
u"""Serumwerk GmbH Dresden, Postfach
,Hundeschläger' für die Dezimierung der
Momente ihrer Erfahrung".
zusammen.
"""]
class TestLanguageIdentifier(object):
def setup_class(self):
self.langident = LanguageIdentifier()
def test_identify_lang(self):
assert self.langident.identify_lang('') == None
assert self.langident.identify_lang(TEXT) == 'de'
def test_identify_store(self):
langlist = [TranslationUnit(string) for string in TEXT_LIST]
assert self.langident.identify_source_lang(langlist) == 'de'
for i, unit in enumerate(langlist):
unit.target = TEXT_LIST[i]
assert self.langident.identify_target_lang(langlist) == 'de'
def test_bad_init_data(self):
"""Test __init__ with bad conf files and data dirs"""
assert raises(ValueError, LanguageIdentifier, model_dir='missing')
assert raises(ValueError, LanguageIdentifier, conf_file='missing')
|
de
| 0.995448
|
#!/usr/bin/env python # -*- coding: UTF-8 -*- Ästhetik des "Erhabenen" herangezogen. kostete (hinzu kommen über 6 630 tote O3 steht für Ozon; es wird in der NO2 sind wesentlich am "sauren Regen" Ethik hängt eng mit einer Dauerkrise der Serumwerk GmbH Dresden, Postfach ,Hundeschläger' für die Dezimierung der Momente ihrer Erfahrung". zusammen. ihren Kampf um Boden wie um Unsinn, weil die Leute Unsinn wollen Ressourcen als soziales Entwicklungsproblem". der Leunabrücke durch Kommune, Speiseröhre oder bei Atemstörungen von hob er hervor, daß die Knorpel "Reisekader" wurden zu DDR-Zeiten für die soziale Verständigung zugesprochen hinaus noch viele Fähigkeiten entwickelte. Adorno). Frankfurter Vereine. Und die erste evangelische Schule hatte beispielsweise die Pfarrkirche, das gebracht. Offenbar spielt die Schlafposition Menschlichkeit oder Rechtsstaatlichkeit Die nun geplante Straße würde im zum Thema "Der psychisch kranke ergaben im Zeitraum von 1986 bis 1989 junge Leute sind oft zahlungskräftig in unter die Bettdecke gerate, könne es sich Schäden hinterläßt, berechtigt körperlichen Belastbarkeit. Tatsächlich von der Drogenpolitik zu reden) oder Parlament unter syrischem Druck diesen Jahrhunderten der wuchtige Turm für auf die Frage aus, wie sich Eltern verhalten ehemalige Generalsekretär der Partei, Mark erhöht", sagt Hühsam, "das war bei Über eine Annonce in einem Frankfurter der Töpfer ein. Anhand von gefundenen gut kennt, hatte ihm die wahren Tatsachen Sechzehn Adorno-Schüler erinnern und daß ein Weiterdenken der Theorie für ihre Festlegung sind drei Jahre Erschütterung Einblick in die Abhängigkeit der Bauarbeiten sei erst im Laufe des als neuen Kometen am Kandidatenhimmel ergaben im Zeitraum von 1986 bis 1989 - ein neuer Beitrag zur Fortschreibung Triptychon im Sitzungssaal des Ortsbeirates Karin gab später ein bemerkenswertes mit dem er darüber reden konnte? Kunstwerk niemals das Ganze (der Welt junge Talente vor, die vielleicht irgendwo der AG Schweizer Straße, einer Initiative für Stickstoffdioxid; diese Substanzen Tätigkeit in erster Linie das sportliche kommentiert worden, sowohl skeptisch auch durch "eine Unmenge Zuschriften Grundschule, in deren Gebäude auch die gegen Streß und die sexuelle Attraktivität. <NAME> und <NAME> aus besteht für die Leunabrücke keine rechtliche auf einem Parteikongreß mittels Abstimmung Laurentiuskirche. später der SED beitraten?" Es ist der - und die Leute wollen Unsinn, weil früh geboren wurden oder an Muskelschwäche Grundlage. "Bei einem Brückenbau Mensch" auf, als ein automatisch flirtender. und sich inzwischen als Operateur xx = Schadstoff wird dort nicht sondern auch für die Geschäftsleute. Kommunismus eintreten würde. In ihren NCV-Vorsitzender <NAME> ehrte Aufsicht bereit sind. *leo Daseins, in dem sie Möglichkeiten einer die alten Schwanheimer?" in der können sich ein Lachen nicht verkneifen. ist". Die "gesunde Mischung" aus edlen genannt hatte: "<NAME>, ist vorbeugend schon 1936 von Adorno Ruhe ein", sagt <NAME> vor den Ökologie bald auch englischen Rasen der Nachbarn. Schon Forschungsarbeit sich doch noch hat habilitieren dringend davor, Säuglinge in den ersten Milligramm je Kubikmeter Im Gespräch: Indianer aus Kolumbien wenige Fälle von Plötzlichem Kindstod. Für nicht empfehlenswert hält er Fußball SO2 steht für Schwefeldioxid, NO2 Schwanheimer Unterfeldes hin. Rund 110 Adorno 1957 auf eine törichte Dissonanz-Rezension durch Laute, Lächeln und Greifen und kamen, um abzustimmen." Doch daß genau das nach dem Ende des Zedillo, erst vor kurzem ins Erziehungsministerium "andere Geschichte", die "unlogische". Übungen zu integrieren und somit wenigstens Ausmaße angenommen. Überall wimmelte ambulant - einen Namen gemacht hat. Kiesgruben im Unterfeld als der in der Verfassung festgeschriebenen Seit 1975 habe er in seinem Fachgebiet Feuilletons eingerissene Methode, durch ganz woanders, schon damals mehr zu machen." Heute verkauft dort für das existentielle Bewußtsein belegen. überhöht und verklärt als durchdringt Tatsächlich hat sich die durchschnittliche "sehr, sehr schwer". Alle aber entwikkelten der Bauchlage aufgeklärt wurde, ging der - und die Leute wollen Unsinn, weil in der einen Hand das Frühstücksbrötchen, besitzen. Solche Sportarten mit einer Aktion zusammen, nach Bornheim wanderten, um ihren sind, an den Ausführungsbestimmungen. Um eventuelle Entsorgungskosten zu junge Leute sind oft zahlungskräftig in Zwar versicherte der syrische Vizepräsident einem internen Korrektiv der Ethik. Eckpfeiler der einstigen Stadtbefestigung durchstieß er, als er den Arm auf die hat es ihm nachgemacht. Und auch nachgedacht, wie sein Leben wohl verlaufen wie hoffnungsvoll: "Wie die Toten wehrlos und ging besonders auf das Handwerk Syrien. <NAME> Brüche glättete, steht er zukünftig und erschüttert, wird Ästhetik zu Fitneß-Studio individuell abgestimmte der Strenge des Bilderverbots. Carneval-Vereins (NCV) beim traditionellen bringen, ohne sie dem Diktat versöhnender und in den Karnevalvereinen - Ästhetik des "Erhabenen" herangezogen. kostete (hinzu kommen über 6 630 tote O3 steht für Ozon; es wird in der NO2 sind wesentlich am "sauren Regen" Ethik hängt eng mit einer Dauerkrise der Serumwerk GmbH Dresden, Postfach ,Hundeschläger' für die Dezimierung der Momente ihrer Erfahrung". zusammen. Test __init__ with bad conf files and data dirs
| 2.065322
| 2
|
par_gcp_phys_barebone.py
|
Suhaibinator/CarlaExperiments
| 0
|
6625614
|
#!/usr/bin/env python
"""
Barebone Simulation without camera or extraneous sensors.
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import torch
import sys
try:
import carla
except:
sys.path.append('/home/suhaib_abdulquddos/carla_files/PythonAPI/carla/dist/carla-0.9.8-py3.5-linux-x86_64.egg')
sys.path.append('/home/suhaib/Desktop/GRA/carla2/PythonAPI/carla/dist/carla-0.9.8-py3.5-linux-x86_64.egg')
import carla
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
from carla import ColorConverter as cc
import argparse
import collections
import datetime
import logging
import math
import random
import re
import weakref
from regression4 import get_distance
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, actor_filter, phys_settings, actor_role_name='hero'):
self.world = carla_world
self.actor_role_name = actor_role_name
self.map = self.world.get_map()
self.player = None
self.speed = 60
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = actor_filter
self.restart()
if phys_settings is not None:
self.apply_physics(phys_settings)
self.recording_enabled = False
self.recording_start = 0
self.f0 = 0
self.f1 = 0
def restart(self):
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
while self.player is None:
new_transform = carla.Transform(carla.Location(x=229.8, y=81.1, z=1), carla.Rotation(pitch=0, yaw=92.0042, roll=0))
self.world.get_spectator().set_transform(new_transform)
self.player = self.world.try_spawn_actor(blueprint, new_transform) # Set up the sensors.
#self.player.set_autopilot(True)
def apply_physics(self, phys_settings):
self.world.tick()
"""
Default wheel settings:
front_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7)
front_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7)
rear_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
rear_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
"""
front_left_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['flwf'], damping_rate=0.25, max_steer_angle=phys_settings['flwmsa'], radius=36.7)
front_right_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['frwf'], damping_rate=0.25, max_steer_angle=phys_settings['frwmsa'], radius=36.7)
rear_left_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['rlwf'], damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
rear_right_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['rrwf'], damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
wheels = [front_left_wheel, front_right_wheel, rear_left_wheel, rear_right_wheel]
# Change Vehicle Physics Control parameters of the vehicle
physics_control = self.player.get_physics_control()
#physics_control.max_rpm = phys_settings['max_rpm']
physics_control.mass = phys_settings['mass']
#physics_control.drag_coefficient = phys_settings['drag_coefficient']
physics_control.wheels = wheels
physics_control.steering_curve = [carla.Vector2D(0, 1), carla.Vector2D(20, phys_settings['steer1']), carla.Vector2D(60, phys_settings['steer2']), carla.Vector2D(120, phys_settings['steer3'])]
physics_control.torque_curve = [carla.Vector2D(0, 400), carla.Vector2D(890, phys_settings['torque1']), carla.Vector2D(5729.577, 400)]
self.speed = phys_settings['speed']
# Apply Vehicle Physics Control for the vehicle
self.player.apply_physics_control(physics_control)
#rot = self.player.get_transform().rotation.yaw
#self.player.set_velocity(carla.Vector3D(self.speed*math.cos(rot*math.pi/180)/3.6, self.speed*math.sin(rot*math.pi/180)/3.6, 0))
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
actors = [
self.player]
for actor in actors:
if actor is not None:
actor.destroy()
def eval_reward(self):
current_loc = self.player.get_transform().location
v = self.player.get_velocity()
speed = 3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)
self.f0 += 10 if current_loc.y < 81 or speed < 5.0 else 0
self.f0 += abs(get_distance(current_loc.x, current_loc.y))
#print("F0 val: " + str(self.f0))
def eval_target_distance_reward(self):
pos = self.player.get_location()
return 10*math.sqrt((pos.x-25)**2+(pos.y-193.7)**2)
# ==============================================================================
# -- KeyboardControl -----------------------------------------------------------
# ==============================================================================
class KeyboardControl(object):
def __init__(self, world, start_in_autopilot, net, scaler):
self._net = net
self._scaler = scaler
self.world = world
self._autopilot_enabled = start_in_autopilot
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
#world.player.set_autopilot(self._autopilot_enabled)
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
else:
raise NotImplementedError("Actor type not supported")
self._steer_cache = 0.0
def parse_events(self, client, world):
current_transform = world.player.get_transform()
pos = current_transform.location
#print("Distance: " + str((pos.x-25)**2+(pos.y-193.7)**2))
if (pos.x-25)**2+((pos.y-193.7)/6)**2 < 20:
return 5
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
v = world.player.get_velocity()
current_transform = world.player.get_transform()
pos = current_transform.location
self.calculate_steering_throttle(v.x, v.y, pos.x, pos.y, current_transform.rotation.yaw, get_distance(pos.x, pos.y))
world.player.apply_control(self._control)
def calculate_steering_throttle(self, vel_x, vel_y, pos_x, pos_y, yaw, displacement):
with torch.no_grad():
chosen_action = self._net(torch.FloatTensor([vel_x, vel_y, pos_x, pos_y, yaw, displacement]))
#print("Action: " + str(chosen_action))
self._steer_cache = chosen_action[0].item()
self._control.steer = round(self._steer_cache, 1)
self._control.throttle = chosen_action[1].item()
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args, net, scaler, port, phys_settings):
world = None
timestep = 0.1
"""
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
args = Bunch({'autopilot':False, 'debug':False, 'filter':'vehicle.tesla.model3', 'height':720, 'host':'127.0.0.1', 'port':2000, 'res':'1280x720', 'rolename':'hero', 'width':1280})
"""
try:
client = carla.Client(args.host, args.port)
client.set_timeout(20.0)
print(args)
#if (client.get_world().get_map().name != "Town03"):
# client.load_world('Town03')
world = World(client.get_world(), args.filter, phys_settings, args.rolename)
controller = KeyboardControl(world, args.autopilot, net, scaler)
settings = world.world.get_settings()
if not settings.synchronous_mode or settings.fixed_delta_seconds != timestep:
settings.synchronous_mode = True
settings.fixed_delta_seconds = timestep
world.world.apply_settings(settings)
for i in range(math.ceil(20/timestep)):
world.world.tick()
result = controller.parse_events(client, world)
if result == 5:
return world.f0, world.eval_target_distance_reward()
elif result:
return
world.eval_reward()
f0 = world.f0
f1 = world.eval_target_distance_reward()
finally:
if (world and world.recording_enabled):
client.stop_recorder()
if world is not None:
world.destroy()
return f0, f1
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def Game(neural_net, scaler, port, phys_settings):
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=port,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.tesla.model3',
help='actor filter (default: "vehicle.tesla.model3")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
try:
f0, f1 = game_loop(args, neural_net, scaler, port, phys_settings)
print
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
return f0, f1
|
#!/usr/bin/env python
"""
Barebone Simulation without camera or extraneous sensors.
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import torch
import sys
try:
import carla
except:
sys.path.append('/home/suhaib_abdulquddos/carla_files/PythonAPI/carla/dist/carla-0.9.8-py3.5-linux-x86_64.egg')
sys.path.append('/home/suhaib/Desktop/GRA/carla2/PythonAPI/carla/dist/carla-0.9.8-py3.5-linux-x86_64.egg')
import carla
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
from carla import ColorConverter as cc
import argparse
import collections
import datetime
import logging
import math
import random
import re
import weakref
from regression4 import get_distance
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, actor_filter, phys_settings, actor_role_name='hero'):
self.world = carla_world
self.actor_role_name = actor_role_name
self.map = self.world.get_map()
self.player = None
self.speed = 60
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = actor_filter
self.restart()
if phys_settings is not None:
self.apply_physics(phys_settings)
self.recording_enabled = False
self.recording_start = 0
self.f0 = 0
self.f1 = 0
def restart(self):
# Get a random blueprint.
blueprint = random.choice(self.world.get_blueprint_library().filter(self._actor_filter))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
while self.player is None:
new_transform = carla.Transform(carla.Location(x=229.8, y=81.1, z=1), carla.Rotation(pitch=0, yaw=92.0042, roll=0))
self.world.get_spectator().set_transform(new_transform)
self.player = self.world.try_spawn_actor(blueprint, new_transform) # Set up the sensors.
#self.player.set_autopilot(True)
def apply_physics(self, phys_settings):
self.world.tick()
"""
Default wheel settings:
front_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7)
front_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7)
rear_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
rear_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
"""
front_left_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['flwf'], damping_rate=0.25, max_steer_angle=phys_settings['flwmsa'], radius=36.7)
front_right_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['frwf'], damping_rate=0.25, max_steer_angle=phys_settings['frwmsa'], radius=36.7)
rear_left_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['rlwf'], damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
rear_right_wheel = carla.WheelPhysicsControl(tire_friction=phys_settings['rrwf'], damping_rate=0.25, max_steer_angle=0.0, radius=36.0)
wheels = [front_left_wheel, front_right_wheel, rear_left_wheel, rear_right_wheel]
# Change Vehicle Physics Control parameters of the vehicle
physics_control = self.player.get_physics_control()
#physics_control.max_rpm = phys_settings['max_rpm']
physics_control.mass = phys_settings['mass']
#physics_control.drag_coefficient = phys_settings['drag_coefficient']
physics_control.wheels = wheels
physics_control.steering_curve = [carla.Vector2D(0, 1), carla.Vector2D(20, phys_settings['steer1']), carla.Vector2D(60, phys_settings['steer2']), carla.Vector2D(120, phys_settings['steer3'])]
physics_control.torque_curve = [carla.Vector2D(0, 400), carla.Vector2D(890, phys_settings['torque1']), carla.Vector2D(5729.577, 400)]
self.speed = phys_settings['speed']
# Apply Vehicle Physics Control for the vehicle
self.player.apply_physics_control(physics_control)
#rot = self.player.get_transform().rotation.yaw
#self.player.set_velocity(carla.Vector3D(self.speed*math.cos(rot*math.pi/180)/3.6, self.speed*math.sin(rot*math.pi/180)/3.6, 0))
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
actors = [
self.player]
for actor in actors:
if actor is not None:
actor.destroy()
def eval_reward(self):
current_loc = self.player.get_transform().location
v = self.player.get_velocity()
speed = 3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)
self.f0 += 10 if current_loc.y < 81 or speed < 5.0 else 0
self.f0 += abs(get_distance(current_loc.x, current_loc.y))
#print("F0 val: " + str(self.f0))
def eval_target_distance_reward(self):
pos = self.player.get_location()
return 10*math.sqrt((pos.x-25)**2+(pos.y-193.7)**2)
# ==============================================================================
# -- KeyboardControl -----------------------------------------------------------
# ==============================================================================
class KeyboardControl(object):
def __init__(self, world, start_in_autopilot, net, scaler):
self._net = net
self._scaler = scaler
self.world = world
self._autopilot_enabled = start_in_autopilot
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
#world.player.set_autopilot(self._autopilot_enabled)
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
else:
raise NotImplementedError("Actor type not supported")
self._steer_cache = 0.0
def parse_events(self, client, world):
current_transform = world.player.get_transform()
pos = current_transform.location
#print("Distance: " + str((pos.x-25)**2+(pos.y-193.7)**2))
if (pos.x-25)**2+((pos.y-193.7)/6)**2 < 20:
return 5
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
v = world.player.get_velocity()
current_transform = world.player.get_transform()
pos = current_transform.location
self.calculate_steering_throttle(v.x, v.y, pos.x, pos.y, current_transform.rotation.yaw, get_distance(pos.x, pos.y))
world.player.apply_control(self._control)
def calculate_steering_throttle(self, vel_x, vel_y, pos_x, pos_y, yaw, displacement):
with torch.no_grad():
chosen_action = self._net(torch.FloatTensor([vel_x, vel_y, pos_x, pos_y, yaw, displacement]))
#print("Action: " + str(chosen_action))
self._steer_cache = chosen_action[0].item()
self._control.steer = round(self._steer_cache, 1)
self._control.throttle = chosen_action[1].item()
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args, net, scaler, port, phys_settings):
world = None
timestep = 0.1
"""
class Bunch(object):
def __init__(self, adict):
self.__dict__.update(adict)
args = Bunch({'autopilot':False, 'debug':False, 'filter':'vehicle.tesla.model3', 'height':720, 'host':'127.0.0.1', 'port':2000, 'res':'1280x720', 'rolename':'hero', 'width':1280})
"""
try:
client = carla.Client(args.host, args.port)
client.set_timeout(20.0)
print(args)
#if (client.get_world().get_map().name != "Town03"):
# client.load_world('Town03')
world = World(client.get_world(), args.filter, phys_settings, args.rolename)
controller = KeyboardControl(world, args.autopilot, net, scaler)
settings = world.world.get_settings()
if not settings.synchronous_mode or settings.fixed_delta_seconds != timestep:
settings.synchronous_mode = True
settings.fixed_delta_seconds = timestep
world.world.apply_settings(settings)
for i in range(math.ceil(20/timestep)):
world.world.tick()
result = controller.parse_events(client, world)
if result == 5:
return world.f0, world.eval_target_distance_reward()
elif result:
return
world.eval_reward()
f0 = world.f0
f1 = world.eval_target_distance_reward()
finally:
if (world and world.recording_enabled):
client.stop_recorder()
if world is not None:
world.destroy()
return f0, f1
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def Game(neural_net, scaler, port, phys_settings):
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=port,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.tesla.model3',
help='actor filter (default: "vehicle.tesla.model3")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
try:
f0, f1 = game_loop(args, neural_net, scaler, port, phys_settings)
print
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
return f0, f1
|
en
| 0.263888
|
#!/usr/bin/env python Barebone Simulation without camera or extraneous sensors. # ============================================================================== # -- find carla module --------------------------------------------------------- # ============================================================================== # ============================================================================== # -- imports ------------------------------------------------------------------- # ============================================================================== # ============================================================================== # -- Global functions ---------------------------------------------------------- # ============================================================================== # ============================================================================== # -- World --------------------------------------------------------------------- # ============================================================================== # Get a random blueprint. # Set up the sensors. #self.player.set_autopilot(True) Default wheel settings: front_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7) front_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=70, radius=36.7) rear_left_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0) rear_right_wheel = carla.WheelPhysicsControl(tire_friction=3.5, damping_rate=0.25, max_steer_angle=0.0, radius=36.0) # Change Vehicle Physics Control parameters of the vehicle #physics_control.max_rpm = phys_settings['max_rpm'] #physics_control.drag_coefficient = phys_settings['drag_coefficient'] # Apply Vehicle Physics Control for the vehicle #rot = self.player.get_transform().rotation.yaw #self.player.set_velocity(carla.Vector3D(self.speed*math.cos(rot*math.pi/180)/3.6, self.speed*math.sin(rot*math.pi/180)/3.6, 0)) #print("F0 val: " + str(self.f0)) # ============================================================================== # -- KeyboardControl ----------------------------------------------------------- # ============================================================================== #world.player.set_autopilot(self._autopilot_enabled) #print("Distance: " + str((pos.x-25)**2+(pos.y-193.7)**2)) #print("Action: " + str(chosen_action)) # ============================================================================== # -- game_loop() --------------------------------------------------------------- # ============================================================================== class Bunch(object): def __init__(self, adict): self.__dict__.update(adict) args = Bunch({'autopilot':False, 'debug':False, 'filter':'vehicle.tesla.model3', 'height':720, 'host':'127.0.0.1', 'port':2000, 'res':'1280x720', 'rolename':'hero', 'width':1280}) #if (client.get_world().get_map().name != "Town03"): # client.load_world('Town03') # ============================================================================== # -- main() -------------------------------------------------------------------- # ==============================================================================
| 1.426127
| 1
|
src/fsmpy/base.py
|
MonashUAS/fsmpy
| 0
|
6625615
|
<filename>src/fsmpy/base.py
import logging
logger = ""
'''
Base class for all FSM elements.
Implements name and logging features that can be used on all FSM elements.
'''
class Base(object):
def __init__(self, typ, name):
if typ == None or typ == "":
raise Exception("no type set for class")
self.__type = typ
if name == None or name == "":
raise Exception("no name set for %s"%(self.__type))
self.__name = name
self.__logger = logging.getLogger(logger)
def __format(self, msg):
return "%s (%s): %s"%(self.__type, self.name, msg)
# Returns the name of the FSM element.
@property
def name(self):
return self.__name
### ROS logging methods ###
def logdebug(self, msg, *args, **kw):
self.__logger.debug(self.__format(msg), *args, **kw)
def loginfo(self, msg, *args, **kw):
self.__logger.info(self.__format(msg), *args, **kw)
def logwarn(self, msg, *args, **kw):
self.__logger.warning(self.__format(msg), *args, **kw)
def logerr(self, msg, *args, **kw):
self.__logger.error(self.__format(msg), *args, **kw)
def logfatal(self, msg, *args, **kw):
msg_fmt = self.__format(msg)
self.__logger.critical(msg_fmt, *args, **kw)
raise Exception(msg_fmt)
### / ROS logging methods ###
|
<filename>src/fsmpy/base.py
import logging
logger = ""
'''
Base class for all FSM elements.
Implements name and logging features that can be used on all FSM elements.
'''
class Base(object):
def __init__(self, typ, name):
if typ == None or typ == "":
raise Exception("no type set for class")
self.__type = typ
if name == None or name == "":
raise Exception("no name set for %s"%(self.__type))
self.__name = name
self.__logger = logging.getLogger(logger)
def __format(self, msg):
return "%s (%s): %s"%(self.__type, self.name, msg)
# Returns the name of the FSM element.
@property
def name(self):
return self.__name
### ROS logging methods ###
def logdebug(self, msg, *args, **kw):
self.__logger.debug(self.__format(msg), *args, **kw)
def loginfo(self, msg, *args, **kw):
self.__logger.info(self.__format(msg), *args, **kw)
def logwarn(self, msg, *args, **kw):
self.__logger.warning(self.__format(msg), *args, **kw)
def logerr(self, msg, *args, **kw):
self.__logger.error(self.__format(msg), *args, **kw)
def logfatal(self, msg, *args, **kw):
msg_fmt = self.__format(msg)
self.__logger.critical(msg_fmt, *args, **kw)
raise Exception(msg_fmt)
### / ROS logging methods ###
|
en
| 0.682678
|
Base class for all FSM elements. Implements name and logging features that can be used on all FSM elements. # Returns the name of the FSM element. ### ROS logging methods ### ### / ROS logging methods ###
| 2.454788
| 2
|
app/task/models.py
|
iasmini/task_manager
| 0
|
6625616
|
from django.db import models
class Task(models.Model):
name = models.CharField(max_length=255)
finished = models.BooleanField(default=False)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
|
from django.db import models
class Task(models.Model):
name = models.CharField(max_length=255)
finished = models.BooleanField(default=False)
class Meta:
ordering = ["name"]
def __str__(self):
return self.name
|
none
| 1
| 2.265201
| 2
|
|
dataset.py
|
biomed-AI/CoSMIG
| 3
|
6625617
|
import numpy as np
import random
from tqdm import tqdm
import os, sys, pdb, math, time
from copy import deepcopy
import multiprocessing as mp
import networkx as nx
import argparse
import scipy.io as sio
import scipy.sparse as ssp
import torch
from torch_geometric.data import Data, Dataset, InMemoryDataset
from sklearn.preprocessing import LabelBinarizer
import warnings
warnings.simplefilter('ignore', ssp.SparseEfficiencyWarning)
cur_dir = os.path.dirname(os.path.realpath(__file__))
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
class SparseRowIndexer:
def __init__(self, csr_matrix):
data = []
indices = []
indptr = []
for row_start, row_end in zip(csr_matrix.indptr[:-1], csr_matrix.indptr[1:]):
data.append(csr_matrix.data[row_start:row_end])
indices.append(csr_matrix.indices[row_start:row_end])
indptr.append(row_end - row_start) # nnz of the row
self.data = np.array(data, dtype=object)
self.indices = np.array(indices, dtype=object)
self.indptr = np.array(indptr, dtype=object)
self.shape = csr_matrix.shape
def __getitem__(self, row_selector):
indices = np.concatenate(self.indices[row_selector])
data = np.concatenate(self.data[row_selector])
indptr = np.append(0, np.cumsum(self.indptr[row_selector]))
shape = [indptr.shape[0] - 1, self.shape[1]]
return ssp.csr_matrix((data, indices, indptr), shape=shape)
class SparseColIndexer:
def __init__(self, csc_matrix):
data = []
indices = []
indptr = []
for col_start, col_end in zip(csc_matrix.indptr[:-1], csc_matrix.indptr[1:]):
data.append(csc_matrix.data[col_start:col_end])
indices.append(csc_matrix.indices[col_start:col_end])
indptr.append(col_end - col_start)
self.data = np.array(data, dtype=object)
self.indices = np.array(indices, dtype=object)
self.indptr = np.array(indptr, dtype=object)
self.shape = csc_matrix.shape
def __getitem__(self, col_selector):
indices = np.concatenate(self.indices[col_selector])
data = np.concatenate(self.data[col_selector])
indptr = np.append(0, np.cumsum(self.indptr[col_selector]))
shape = [self.shape[0], indptr.shape[0] - 1]
return ssp.csc_matrix((data, indices, indptr), shape=shape)
class MyDataset(InMemoryDataset):
def __init__(self, root, A, links, labels, h, sample_ratio, max_nodes_per_hop,
u_features, v_features, class_values, max_num=None, parallel=True):
self.Arow = SparseRowIndexer(A)
self.Acol = SparseColIndexer(A.tocsc())
self.links = links
self.labels = labels
self.h = h
self.sample_ratio = sample_ratio
self.max_nodes_per_hop = max_nodes_per_hop
self.u_features = u_features
self.v_features = v_features
self.class_values = class_values
self.parallel = parallel
self.mlb = LabelBinarizer()
if max(self.class_values) > 1:
self.mlb.fit(np.array(class_values))
else:
self.mlb.fit(np.array([-1.0, 0.0, 1.0]))
self.max_num = max_num
if max_num is not None:
np.random.seed(123)
num_links = len(links[0])
perm = np.random.permutation(num_links)
perm = perm[:max_num]
self.links = (links[0][perm], links[1][perm])
self.labels = labels[perm]
super(MyDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def processed_file_names(self):
name = 'data.pt'
if self.max_num is not None:
name = 'data_{}.pt'.format(self.max_num)
return [name]
def edge_features(self):
if len(set(self.class_values)) == 2:
return 3
return len(set(self.class_values))
def process(self):
# Extract enclosing subgraphs and save to disk
data_list = links2subgraphs(self.Arow, self.Acol, self.links, self.labels, self.h,
self.sample_ratio, self.max_nodes_per_hop,
self.u_features, self.v_features,
self.class_values, self.parallel, self.mlb)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
del data_list
class MyDynamicDataset(Dataset):
def __init__(self, root, A, links, labels, h, sample_ratio, max_nodes_per_hop,
u_features, v_features, class_values, max_num=None):
super(MyDynamicDataset, self).__init__(root)
self.Arow = SparseRowIndexer(A)
self.Acol = SparseColIndexer(A.tocsc())
self.links = links
self.labels = labels
self.h = h
self.sample_ratio = sample_ratio
self.max_nodes_per_hop = max_nodes_per_hop
self.u_features = u_features
self.v_features = v_features
self.class_values = class_values
self.mlb = LabelBinarizer()
if max(self.class_values) > 1:
self.mlb.fit(np.array(class_values))
else:
self.mlb.fit(np.array([-1.0, 0.0, 1.0]))
if max_num is not None:
np.random.seed(123)
num_links = len(links[0])
perm = np.random.permutation(num_links)
perm = perm[:max_num]
self.links = (links[0][perm], links[1][perm])
self.labels = labels[perm]
def __len__(self):
return len(self.links[0])
def edge_features(self):
if len(set(self.class_values)) == 2:
return 3
return len(set(self.class_values))
def get(self, idx):
i, j = self.links[0][idx], self.links[1][idx]
g_label = self.labels[idx]
tmp = subgraph_extraction_labeling(
(i, j), self.Arow, self.Acol, self.h, self.sample_ratio, self.max_nodes_per_hop,
self.u_features, self.v_features, self.class_values, g_label, self.mlb
)
return construct_pyg_graph(*tmp)
def links2subgraphs(Arow,
Acol,
links,
labels,
h=1,
sample_ratio=1.0,
max_nodes_per_hop=None,
u_features=None,
v_features=None,
class_values=None,
parallel=True,
mlb=None):
# extract enclosing subgraphs
print('Enclosing subgraph extraction begins...')
g_list = []
if not parallel:
with tqdm(total=len(links[0])) as pbar:
for i, j, g_label in zip(links[0], links[1], labels):
tmp = subgraph_extraction_labeling(
(i, j), Arow, Acol, h, sample_ratio, max_nodes_per_hop, u_features,
v_features, class_values, g_label, mlb
)
data = construct_pyg_graph(*tmp)
g_list.append(data)
pbar.update(1)
else:
start = time.time()
pool = mp.Pool(mp.cpu_count())
results = pool.starmap_async(
subgraph_extraction_labeling,
[
((i, j), Arow, Acol, h, sample_ratio, max_nodes_per_hop, u_features,
v_features, class_values, g_label, mlb)
for i, j, g_label in zip(links[0], links[1], labels)
]
)
remaining = results._number_left
pbar = tqdm(total=remaining)
while True:
pbar.update(remaining - results._number_left)
if results.ready(): break
remaining = results._number_left
time.sleep(1)
results = results.get()
pool.close()
pbar.close()
end = time.time()
print("Time elapsed for subgraph extraction: {}s".format(end-start))
print("Transforming to pytorch_geometric graphs...")
g_list = []
pbar = tqdm(total=len(results))
while results:
tmp = results.pop()
g_list.append(construct_pyg_graph(*tmp))
pbar.update(1)
pbar.close()
end2 = time.time()
print("Time elapsed for transforming to pytorch_geometric graphs: {}s".format(end2-end))
return g_list
def subgraph_extraction_labeling(ind, Arow, Acol, h=1, sample_ratio=1.0, max_nodes_per_hop=None,
u_features=None, v_features=None, class_values=None,
y=1, mlb=None):
# extract the h-hop enclosing subgraph around link 'ind'
u_nodes, v_nodes = [ind[0]], [ind[1]]
u_dist, v_dist = [0], [0]
u_visited, v_visited = set([ind[0]]), set([ind[1]])
u_fringe, v_fringe = set([ind[0]]), set([ind[1]])
for dist in range(1, h+1):
v_fringe, u_fringe = neighbors(u_fringe, Arow), neighbors(v_fringe, Acol)
u_fringe = u_fringe - u_visited
v_fringe = v_fringe - v_visited
u_visited = u_visited.union(u_fringe)
v_visited = v_visited.union(v_fringe)
if sample_ratio < 1.0:
u_fringe = random.sample(u_fringe, int(sample_ratio*len(u_fringe)))
v_fringe = random.sample(v_fringe, int(sample_ratio*len(v_fringe)))
if max_nodes_per_hop is not None:
if max_nodes_per_hop < len(u_fringe):
u_fringe = random.sample(u_fringe, max_nodes_per_hop)
if max_nodes_per_hop < len(v_fringe):
v_fringe = random.sample(v_fringe, max_nodes_per_hop)
if len(u_fringe) == 0 and len(v_fringe) == 0:
break
u_nodes = u_nodes + list(u_fringe)
v_nodes = v_nodes + list(v_fringe)
u_dist = u_dist + [dist] * len(u_fringe)
v_dist = v_dist + [dist] * len(v_fringe)
subgraph = Arow[u_nodes][:, v_nodes]
# remove link between target nodes
subgraph[0, 0] = 0
# prepare pyg graph constructor input
u, v, r = ssp.find(subgraph) # r is 1, 2... (rating labels + 1)
v += len(u_nodes)
r = r - 1 # transform r back to rating label
# print(onehot_encoding(list(mlb.classes_),r))
if max(r) == 1:
newr = [float(i) if i == 1 else -1 for i in r]
attr = mlb.transform(newr).astype(dtype=np.int8)
else:
attr = mlb.transform(r).astype(dtype=np.int8)
num_nodes = len(u_nodes) + len(v_nodes)
node_labels = [x*2 for x in u_dist] + [x*2+1 for x in v_dist]
max_node_label = 2*h + 1
y = class_values[y]
# get node features
if u_features is not None:
u_features = u_features[u_nodes]
if v_features is not None:
v_features = v_features[v_nodes]
node_features = None
if False:
# directly use padded node features
if u_features is not None and v_features is not None:
u_extended = np.concatenate(
[u_features, np.zeros([u_features.shape[0], v_features.shape[1]])], 1
)
v_extended = np.concatenate(
[np.zeros([v_features.shape[0], u_features.shape[1]]), v_features], 1
)
node_features = np.concatenate([u_extended, v_extended], 0)
if False:
# use identity features (one-hot encodings of node idxes)
u_ids = one_hot(u_nodes, Arow.shape[0] + Arow.shape[1])
v_ids = one_hot([x+Arow.shape[0] for x in v_nodes], Arow.shape[0] + Arow.shape[1])
node_ids = np.concatenate([u_ids, v_ids], 0)
#node_features = np.concatenate([node_features, node_ids], 1)
node_features = node_ids
if True:
# only output node features for the target user and item
if u_features is not None and v_features is not None:
node_features = [u_features[0], v_features[0]]
return u, v, r, node_labels, max_node_label, y, node_features, attr
def construct_pyg_graph(u, v, r, node_labels, max_node_label, y, node_features, attr):
u, v = torch.LongTensor(u), torch.LongTensor(v)
r = torch.LongTensor(r)
edge_index = torch.stack([torch.cat([u, v]), torch.cat([v, u])], 0)
edge_type = torch.cat([r, r])
attr = torch.FloatTensor(attr)
edge_attr = torch.cat([attr, attr], dim=0)
x = torch.FloatTensor(one_hot(node_labels, max_node_label+1))
y = torch.FloatTensor([y])
data = Data(x, edge_index, edge_type=edge_type, edge_attr=edge_attr, y=y)
if node_features is not None:
if type(node_features) == list: # a list of u_feature and v_feature
u_feature, v_feature = node_features
data.u_feature = torch.FloatTensor(u_feature).unsqueeze(0)
data.v_feature = torch.FloatTensor(v_feature).unsqueeze(0)
else:
x2 = torch.FloatTensor(node_features)
data.x = torch.cat([data.x, x2], 1)
return data
def onehot_encoding(x, allowable_set):
return [x == s for s in allowable_set]
def neighbors(fringe, A):
# find all 1-hop neighbors of nodes in fringe from A
if not fringe:
return set([])
return set(A[list(fringe)].indices)
def one_hot(idx, length):
idx = np.array(idx)
x = np.zeros([len(idx), length])
x[np.arange(len(idx)), idx] = 1.0
return x
def PyGGraph_to_nx(data):
edges = list(zip(data.edge_index[0, :].tolist(), data.edge_index[1, :].tolist()))
g = nx.from_edgelist(edges)
g.add_nodes_from(range(len(data.x))) # in case some nodes are isolated
# transform r back to rating label
edge_types = {(u, v): data.edge_type[i].item() for i, (u, v) in enumerate(edges)}
nx.set_edge_attributes(g, name='type', values=edge_types)
node_types = dict(zip(range(data.num_nodes), torch.argmax(data.x, 1).tolist()))
nx.set_node_attributes(g, name='type', values=node_types)
g.graph['rating'] = data.y.item()
return g
|
import numpy as np
import random
from tqdm import tqdm
import os, sys, pdb, math, time
from copy import deepcopy
import multiprocessing as mp
import networkx as nx
import argparse
import scipy.io as sio
import scipy.sparse as ssp
import torch
from torch_geometric.data import Data, Dataset, InMemoryDataset
from sklearn.preprocessing import LabelBinarizer
import warnings
warnings.simplefilter('ignore', ssp.SparseEfficiencyWarning)
cur_dir = os.path.dirname(os.path.realpath(__file__))
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
class SparseRowIndexer:
def __init__(self, csr_matrix):
data = []
indices = []
indptr = []
for row_start, row_end in zip(csr_matrix.indptr[:-1], csr_matrix.indptr[1:]):
data.append(csr_matrix.data[row_start:row_end])
indices.append(csr_matrix.indices[row_start:row_end])
indptr.append(row_end - row_start) # nnz of the row
self.data = np.array(data, dtype=object)
self.indices = np.array(indices, dtype=object)
self.indptr = np.array(indptr, dtype=object)
self.shape = csr_matrix.shape
def __getitem__(self, row_selector):
indices = np.concatenate(self.indices[row_selector])
data = np.concatenate(self.data[row_selector])
indptr = np.append(0, np.cumsum(self.indptr[row_selector]))
shape = [indptr.shape[0] - 1, self.shape[1]]
return ssp.csr_matrix((data, indices, indptr), shape=shape)
class SparseColIndexer:
def __init__(self, csc_matrix):
data = []
indices = []
indptr = []
for col_start, col_end in zip(csc_matrix.indptr[:-1], csc_matrix.indptr[1:]):
data.append(csc_matrix.data[col_start:col_end])
indices.append(csc_matrix.indices[col_start:col_end])
indptr.append(col_end - col_start)
self.data = np.array(data, dtype=object)
self.indices = np.array(indices, dtype=object)
self.indptr = np.array(indptr, dtype=object)
self.shape = csc_matrix.shape
def __getitem__(self, col_selector):
indices = np.concatenate(self.indices[col_selector])
data = np.concatenate(self.data[col_selector])
indptr = np.append(0, np.cumsum(self.indptr[col_selector]))
shape = [self.shape[0], indptr.shape[0] - 1]
return ssp.csc_matrix((data, indices, indptr), shape=shape)
class MyDataset(InMemoryDataset):
def __init__(self, root, A, links, labels, h, sample_ratio, max_nodes_per_hop,
u_features, v_features, class_values, max_num=None, parallel=True):
self.Arow = SparseRowIndexer(A)
self.Acol = SparseColIndexer(A.tocsc())
self.links = links
self.labels = labels
self.h = h
self.sample_ratio = sample_ratio
self.max_nodes_per_hop = max_nodes_per_hop
self.u_features = u_features
self.v_features = v_features
self.class_values = class_values
self.parallel = parallel
self.mlb = LabelBinarizer()
if max(self.class_values) > 1:
self.mlb.fit(np.array(class_values))
else:
self.mlb.fit(np.array([-1.0, 0.0, 1.0]))
self.max_num = max_num
if max_num is not None:
np.random.seed(123)
num_links = len(links[0])
perm = np.random.permutation(num_links)
perm = perm[:max_num]
self.links = (links[0][perm], links[1][perm])
self.labels = labels[perm]
super(MyDataset, self).__init__(root)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def processed_file_names(self):
name = 'data.pt'
if self.max_num is not None:
name = 'data_{}.pt'.format(self.max_num)
return [name]
def edge_features(self):
if len(set(self.class_values)) == 2:
return 3
return len(set(self.class_values))
def process(self):
# Extract enclosing subgraphs and save to disk
data_list = links2subgraphs(self.Arow, self.Acol, self.links, self.labels, self.h,
self.sample_ratio, self.max_nodes_per_hop,
self.u_features, self.v_features,
self.class_values, self.parallel, self.mlb)
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
del data_list
class MyDynamicDataset(Dataset):
def __init__(self, root, A, links, labels, h, sample_ratio, max_nodes_per_hop,
u_features, v_features, class_values, max_num=None):
super(MyDynamicDataset, self).__init__(root)
self.Arow = SparseRowIndexer(A)
self.Acol = SparseColIndexer(A.tocsc())
self.links = links
self.labels = labels
self.h = h
self.sample_ratio = sample_ratio
self.max_nodes_per_hop = max_nodes_per_hop
self.u_features = u_features
self.v_features = v_features
self.class_values = class_values
self.mlb = LabelBinarizer()
if max(self.class_values) > 1:
self.mlb.fit(np.array(class_values))
else:
self.mlb.fit(np.array([-1.0, 0.0, 1.0]))
if max_num is not None:
np.random.seed(123)
num_links = len(links[0])
perm = np.random.permutation(num_links)
perm = perm[:max_num]
self.links = (links[0][perm], links[1][perm])
self.labels = labels[perm]
def __len__(self):
return len(self.links[0])
def edge_features(self):
if len(set(self.class_values)) == 2:
return 3
return len(set(self.class_values))
def get(self, idx):
i, j = self.links[0][idx], self.links[1][idx]
g_label = self.labels[idx]
tmp = subgraph_extraction_labeling(
(i, j), self.Arow, self.Acol, self.h, self.sample_ratio, self.max_nodes_per_hop,
self.u_features, self.v_features, self.class_values, g_label, self.mlb
)
return construct_pyg_graph(*tmp)
def links2subgraphs(Arow,
Acol,
links,
labels,
h=1,
sample_ratio=1.0,
max_nodes_per_hop=None,
u_features=None,
v_features=None,
class_values=None,
parallel=True,
mlb=None):
# extract enclosing subgraphs
print('Enclosing subgraph extraction begins...')
g_list = []
if not parallel:
with tqdm(total=len(links[0])) as pbar:
for i, j, g_label in zip(links[0], links[1], labels):
tmp = subgraph_extraction_labeling(
(i, j), Arow, Acol, h, sample_ratio, max_nodes_per_hop, u_features,
v_features, class_values, g_label, mlb
)
data = construct_pyg_graph(*tmp)
g_list.append(data)
pbar.update(1)
else:
start = time.time()
pool = mp.Pool(mp.cpu_count())
results = pool.starmap_async(
subgraph_extraction_labeling,
[
((i, j), Arow, Acol, h, sample_ratio, max_nodes_per_hop, u_features,
v_features, class_values, g_label, mlb)
for i, j, g_label in zip(links[0], links[1], labels)
]
)
remaining = results._number_left
pbar = tqdm(total=remaining)
while True:
pbar.update(remaining - results._number_left)
if results.ready(): break
remaining = results._number_left
time.sleep(1)
results = results.get()
pool.close()
pbar.close()
end = time.time()
print("Time elapsed for subgraph extraction: {}s".format(end-start))
print("Transforming to pytorch_geometric graphs...")
g_list = []
pbar = tqdm(total=len(results))
while results:
tmp = results.pop()
g_list.append(construct_pyg_graph(*tmp))
pbar.update(1)
pbar.close()
end2 = time.time()
print("Time elapsed for transforming to pytorch_geometric graphs: {}s".format(end2-end))
return g_list
def subgraph_extraction_labeling(ind, Arow, Acol, h=1, sample_ratio=1.0, max_nodes_per_hop=None,
u_features=None, v_features=None, class_values=None,
y=1, mlb=None):
# extract the h-hop enclosing subgraph around link 'ind'
u_nodes, v_nodes = [ind[0]], [ind[1]]
u_dist, v_dist = [0], [0]
u_visited, v_visited = set([ind[0]]), set([ind[1]])
u_fringe, v_fringe = set([ind[0]]), set([ind[1]])
for dist in range(1, h+1):
v_fringe, u_fringe = neighbors(u_fringe, Arow), neighbors(v_fringe, Acol)
u_fringe = u_fringe - u_visited
v_fringe = v_fringe - v_visited
u_visited = u_visited.union(u_fringe)
v_visited = v_visited.union(v_fringe)
if sample_ratio < 1.0:
u_fringe = random.sample(u_fringe, int(sample_ratio*len(u_fringe)))
v_fringe = random.sample(v_fringe, int(sample_ratio*len(v_fringe)))
if max_nodes_per_hop is not None:
if max_nodes_per_hop < len(u_fringe):
u_fringe = random.sample(u_fringe, max_nodes_per_hop)
if max_nodes_per_hop < len(v_fringe):
v_fringe = random.sample(v_fringe, max_nodes_per_hop)
if len(u_fringe) == 0 and len(v_fringe) == 0:
break
u_nodes = u_nodes + list(u_fringe)
v_nodes = v_nodes + list(v_fringe)
u_dist = u_dist + [dist] * len(u_fringe)
v_dist = v_dist + [dist] * len(v_fringe)
subgraph = Arow[u_nodes][:, v_nodes]
# remove link between target nodes
subgraph[0, 0] = 0
# prepare pyg graph constructor input
u, v, r = ssp.find(subgraph) # r is 1, 2... (rating labels + 1)
v += len(u_nodes)
r = r - 1 # transform r back to rating label
# print(onehot_encoding(list(mlb.classes_),r))
if max(r) == 1:
newr = [float(i) if i == 1 else -1 for i in r]
attr = mlb.transform(newr).astype(dtype=np.int8)
else:
attr = mlb.transform(r).astype(dtype=np.int8)
num_nodes = len(u_nodes) + len(v_nodes)
node_labels = [x*2 for x in u_dist] + [x*2+1 for x in v_dist]
max_node_label = 2*h + 1
y = class_values[y]
# get node features
if u_features is not None:
u_features = u_features[u_nodes]
if v_features is not None:
v_features = v_features[v_nodes]
node_features = None
if False:
# directly use padded node features
if u_features is not None and v_features is not None:
u_extended = np.concatenate(
[u_features, np.zeros([u_features.shape[0], v_features.shape[1]])], 1
)
v_extended = np.concatenate(
[np.zeros([v_features.shape[0], u_features.shape[1]]), v_features], 1
)
node_features = np.concatenate([u_extended, v_extended], 0)
if False:
# use identity features (one-hot encodings of node idxes)
u_ids = one_hot(u_nodes, Arow.shape[0] + Arow.shape[1])
v_ids = one_hot([x+Arow.shape[0] for x in v_nodes], Arow.shape[0] + Arow.shape[1])
node_ids = np.concatenate([u_ids, v_ids], 0)
#node_features = np.concatenate([node_features, node_ids], 1)
node_features = node_ids
if True:
# only output node features for the target user and item
if u_features is not None and v_features is not None:
node_features = [u_features[0], v_features[0]]
return u, v, r, node_labels, max_node_label, y, node_features, attr
def construct_pyg_graph(u, v, r, node_labels, max_node_label, y, node_features, attr):
u, v = torch.LongTensor(u), torch.LongTensor(v)
r = torch.LongTensor(r)
edge_index = torch.stack([torch.cat([u, v]), torch.cat([v, u])], 0)
edge_type = torch.cat([r, r])
attr = torch.FloatTensor(attr)
edge_attr = torch.cat([attr, attr], dim=0)
x = torch.FloatTensor(one_hot(node_labels, max_node_label+1))
y = torch.FloatTensor([y])
data = Data(x, edge_index, edge_type=edge_type, edge_attr=edge_attr, y=y)
if node_features is not None:
if type(node_features) == list: # a list of u_feature and v_feature
u_feature, v_feature = node_features
data.u_feature = torch.FloatTensor(u_feature).unsqueeze(0)
data.v_feature = torch.FloatTensor(v_feature).unsqueeze(0)
else:
x2 = torch.FloatTensor(node_features)
data.x = torch.cat([data.x, x2], 1)
return data
def onehot_encoding(x, allowable_set):
return [x == s for s in allowable_set]
def neighbors(fringe, A):
# find all 1-hop neighbors of nodes in fringe from A
if not fringe:
return set([])
return set(A[list(fringe)].indices)
def one_hot(idx, length):
idx = np.array(idx)
x = np.zeros([len(idx), length])
x[np.arange(len(idx)), idx] = 1.0
return x
def PyGGraph_to_nx(data):
edges = list(zip(data.edge_index[0, :].tolist(), data.edge_index[1, :].tolist()))
g = nx.from_edgelist(edges)
g.add_nodes_from(range(len(data.x))) # in case some nodes are isolated
# transform r back to rating label
edge_types = {(u, v): data.edge_type[i].item() for i, (u, v) in enumerate(edges)}
nx.set_edge_attributes(g, name='type', values=edge_types)
node_types = dict(zip(range(data.num_nodes), torch.argmax(data.x, 1).tolist()))
nx.set_node_attributes(g, name='type', values=node_types)
g.graph['rating'] = data.y.item()
return g
|
en
| 0.756735
|
# nnz of the row # Extract enclosing subgraphs and save to disk # extract enclosing subgraphs # extract the h-hop enclosing subgraph around link 'ind' # remove link between target nodes # prepare pyg graph constructor input # r is 1, 2... (rating labels + 1) # transform r back to rating label # print(onehot_encoding(list(mlb.classes_),r)) # get node features # directly use padded node features # use identity features (one-hot encodings of node idxes) #node_features = np.concatenate([node_features, node_ids], 1) # only output node features for the target user and item # a list of u_feature and v_feature # find all 1-hop neighbors of nodes in fringe from A # in case some nodes are isolated # transform r back to rating label
| 2.156184
| 2
|
frictionless/row.py
|
loganbyers/frictionless-py
| 0
|
6625618
|
<gh_stars>0
# TODO: review this dependency
from .plugins.json import JsonParser
from itertools import zip_longest
from collections import OrderedDict
from decimal import Decimal
from .helpers import cached_property
from . import helpers
from . import errors
# TODO: rebase on list base class for permormance?
# TODO: if not list - drop OrderedDict? From Python3.7 order is guaranteed
class Row(OrderedDict):
"""Row representation
API | Usage
-------- | --------
Public | `from frictionless import Table`
This object is returned by `extract`, `table.read_rows`, and other functions.
```python
rows = extract("data/table.csv")
for row in rows:
# work with the Row
```
Parameters:
cells (any[]): array of cells
schema (Schema): table schema
field_positions (int[]): table field positions
row_position (int): row position from 1
row_number (int): row number from 1
"""
def __init__(self, cells, *, schema, field_positions, row_position, row_number):
assert len(field_positions) in (len(cells), len(schema.fields))
# Set attributes
fields = schema.fields
self.__schema = schema
self.__field_positions = field_positions
self.__row_position = row_position
self.__row_number = row_number
self.__blank_cells = {}
self.__error_cells = {}
self.__errors = []
# Extra cells
if len(fields) < len(cells):
iterator = cells[len(fields) :]
start = max(field_positions[: len(fields)]) + 1
del cells[len(fields) :]
for field_position, cell in enumerate(iterator, start=start):
self.__errors.append(
errors.ExtraCellError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(cell),
field_name="",
field_number=len(fields) + field_position - start,
field_position=field_position,
)
)
# Missing cells
if len(fields) > len(cells):
start = len(cells) + 1
iterator = zip_longest(field_positions[len(cells) :], fields[len(cells) :])
for field_number, (field_position, field) in enumerate(iterator, start=start):
if field is not None:
cells.append(None)
self.__errors.append(
errors.MissingCellError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell="",
field_name=field.name,
field_number=field_number,
field_position=field_position
or max(field_positions) + field_number - start + 1,
)
)
# Iterate items
field_number = 0
for field_position, field, source in zip(field_positions, fields, cells):
field_number += 1
# Read cell
target, notes = field.read_cell(source)
type_note = notes.pop("type", None) if notes else None
if target is None and not type_note:
self.__blank_cells[field.name] = source
self[field.name] = target
# Type error
if type_note:
self.__error_cells[field.name] = source
self.__errors.append(
errors.TypeError(
note=type_note,
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(source),
field_name=field.name,
field_number=field_number,
field_position=field_position,
)
)
# Constraint errors
if notes:
for note in notes.values():
self.__errors.append(
errors.ConstraintError(
note=note,
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(source),
field_name=field.name,
field_number=field_number,
field_position=field_position,
)
)
# Blank row
if len(self) == len(self.__blank_cells):
self.__errors = [
errors.BlankRowError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
)
]
@cached_property
def schema(self):
"""
Returns:
Schema: table schema
"""
return self.__schema
@cached_property
def field_positions(self):
"""
Returns:
int[]: table field positions
"""
return self.__field_positions
@cached_property
def row_position(self):
"""
Returns:
int: row position from 1
"""
return self.__row_position
@cached_property
def row_number(self):
"""
Returns:
int: row number from 1
"""
return self.__row_number
@cached_property
def blank_cells(self):
"""A mapping indexed by a field name with blank cells before parsing
Returns:
dict: row blank cells
"""
return self.__blank_cells
@cached_property
def error_cells(self):
"""A mapping indexed by a field name with error cells before parsing
Returns:
dict: row error cells
"""
return self.__error_cells
@cached_property
def errors(self):
"""
Returns:
Error[]: row errors
"""
return self.__errors
@cached_property
def valid(self):
"""
Returns:
bool: if row valid
"""
return not self.__errors
# Import/Export
def to_str(self):
"""
Returns:
str: a row as a CSV string
"""
cells = []
for field in self.__schema.fields:
if field.name in self:
cell, notes = field.write_cell(self[field.name])
cells.append(cell)
return helpers.stringify_csv_string(cells)
def to_dict(self, *, json=False):
"""
Parameters:
json (bool): make data types compatible with JSON format
Returns:
dict: a row as a dictionary
"""
if json:
result = {}
for field in self.__schema.fields:
if field.name in self:
cell = self[field.name]
if field.type not in JsonParser.native_types:
cell, notes = field.write_cell(cell, ignore_missing=True)
if isinstance(cell, Decimal):
cell = float(cell)
result[field.name] = cell
return result
return dict(self)
def to_list(self, *, json=False):
"""
Parameters:
json (bool): make data types compatible with JSON format
Returns:
dict: a row as a list
"""
if json:
result = []
for field in self.__schema.fields:
if field.name in self:
cell = self[field.name]
if field.type not in JsonParser.native_types:
cell, notes = field.write_cell(cell, ignore_missing=True)
if isinstance(cell, Decimal):
cell = float(cell)
result.append(cell)
return result
return list(self.values())
|
# TODO: review this dependency
from .plugins.json import JsonParser
from itertools import zip_longest
from collections import OrderedDict
from decimal import Decimal
from .helpers import cached_property
from . import helpers
from . import errors
# TODO: rebase on list base class for permormance?
# TODO: if not list - drop OrderedDict? From Python3.7 order is guaranteed
class Row(OrderedDict):
"""Row representation
API | Usage
-------- | --------
Public | `from frictionless import Table`
This object is returned by `extract`, `table.read_rows`, and other functions.
```python
rows = extract("data/table.csv")
for row in rows:
# work with the Row
```
Parameters:
cells (any[]): array of cells
schema (Schema): table schema
field_positions (int[]): table field positions
row_position (int): row position from 1
row_number (int): row number from 1
"""
def __init__(self, cells, *, schema, field_positions, row_position, row_number):
assert len(field_positions) in (len(cells), len(schema.fields))
# Set attributes
fields = schema.fields
self.__schema = schema
self.__field_positions = field_positions
self.__row_position = row_position
self.__row_number = row_number
self.__blank_cells = {}
self.__error_cells = {}
self.__errors = []
# Extra cells
if len(fields) < len(cells):
iterator = cells[len(fields) :]
start = max(field_positions[: len(fields)]) + 1
del cells[len(fields) :]
for field_position, cell in enumerate(iterator, start=start):
self.__errors.append(
errors.ExtraCellError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(cell),
field_name="",
field_number=len(fields) + field_position - start,
field_position=field_position,
)
)
# Missing cells
if len(fields) > len(cells):
start = len(cells) + 1
iterator = zip_longest(field_positions[len(cells) :], fields[len(cells) :])
for field_number, (field_position, field) in enumerate(iterator, start=start):
if field is not None:
cells.append(None)
self.__errors.append(
errors.MissingCellError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell="",
field_name=field.name,
field_number=field_number,
field_position=field_position
or max(field_positions) + field_number - start + 1,
)
)
# Iterate items
field_number = 0
for field_position, field, source in zip(field_positions, fields, cells):
field_number += 1
# Read cell
target, notes = field.read_cell(source)
type_note = notes.pop("type", None) if notes else None
if target is None and not type_note:
self.__blank_cells[field.name] = source
self[field.name] = target
# Type error
if type_note:
self.__error_cells[field.name] = source
self.__errors.append(
errors.TypeError(
note=type_note,
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(source),
field_name=field.name,
field_number=field_number,
field_position=field_position,
)
)
# Constraint errors
if notes:
for note in notes.values():
self.__errors.append(
errors.ConstraintError(
note=note,
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
cell=str(source),
field_name=field.name,
field_number=field_number,
field_position=field_position,
)
)
# Blank row
if len(self) == len(self.__blank_cells):
self.__errors = [
errors.BlankRowError(
note="",
cells=list(map(str, cells)),
row_number=row_number,
row_position=row_position,
)
]
@cached_property
def schema(self):
"""
Returns:
Schema: table schema
"""
return self.__schema
@cached_property
def field_positions(self):
"""
Returns:
int[]: table field positions
"""
return self.__field_positions
@cached_property
def row_position(self):
"""
Returns:
int: row position from 1
"""
return self.__row_position
@cached_property
def row_number(self):
"""
Returns:
int: row number from 1
"""
return self.__row_number
@cached_property
def blank_cells(self):
"""A mapping indexed by a field name with blank cells before parsing
Returns:
dict: row blank cells
"""
return self.__blank_cells
@cached_property
def error_cells(self):
"""A mapping indexed by a field name with error cells before parsing
Returns:
dict: row error cells
"""
return self.__error_cells
@cached_property
def errors(self):
"""
Returns:
Error[]: row errors
"""
return self.__errors
@cached_property
def valid(self):
"""
Returns:
bool: if row valid
"""
return not self.__errors
# Import/Export
def to_str(self):
"""
Returns:
str: a row as a CSV string
"""
cells = []
for field in self.__schema.fields:
if field.name in self:
cell, notes = field.write_cell(self[field.name])
cells.append(cell)
return helpers.stringify_csv_string(cells)
def to_dict(self, *, json=False):
"""
Parameters:
json (bool): make data types compatible with JSON format
Returns:
dict: a row as a dictionary
"""
if json:
result = {}
for field in self.__schema.fields:
if field.name in self:
cell = self[field.name]
if field.type not in JsonParser.native_types:
cell, notes = field.write_cell(cell, ignore_missing=True)
if isinstance(cell, Decimal):
cell = float(cell)
result[field.name] = cell
return result
return dict(self)
def to_list(self, *, json=False):
"""
Parameters:
json (bool): make data types compatible with JSON format
Returns:
dict: a row as a list
"""
if json:
result = []
for field in self.__schema.fields:
if field.name in self:
cell = self[field.name]
if field.type not in JsonParser.native_types:
cell, notes = field.write_cell(cell, ignore_missing=True)
if isinstance(cell, Decimal):
cell = float(cell)
result.append(cell)
return result
return list(self.values())
|
en
| 0.590688
|
# TODO: review this dependency # TODO: rebase on list base class for permormance? # TODO: if not list - drop OrderedDict? From Python3.7 order is guaranteed Row representation API | Usage -------- | -------- Public | `from frictionless import Table` This object is returned by `extract`, `table.read_rows`, and other functions. ```python rows = extract("data/table.csv") for row in rows: # work with the Row ``` Parameters: cells (any[]): array of cells schema (Schema): table schema field_positions (int[]): table field positions row_position (int): row position from 1 row_number (int): row number from 1 # Set attributes # Extra cells # Missing cells # Iterate items # Read cell # Type error # Constraint errors # Blank row Returns: Schema: table schema Returns: int[]: table field positions Returns: int: row position from 1 Returns: int: row number from 1 A mapping indexed by a field name with blank cells before parsing Returns: dict: row blank cells A mapping indexed by a field name with error cells before parsing Returns: dict: row error cells Returns: Error[]: row errors Returns: bool: if row valid # Import/Export Returns: str: a row as a CSV string Parameters: json (bool): make data types compatible with JSON format Returns: dict: a row as a dictionary Parameters: json (bool): make data types compatible with JSON format Returns: dict: a row as a list
| 2.554759
| 3
|
datacoco_batch/__init__.py
|
equinoxfitness/datacoco-batch
| 3
|
6625619
|
from .batch import Batch
|
from .batch import Batch
|
none
| 1
| 1.058388
| 1
|
|
common/libs/UrlManager.py
|
Mrlhz/flask
| 0
|
6625620
|
class UrlManager(object):
@staticmethod
def build_url(path):
return path
@staticmethod
def build_static_url(path):
path = path + "?ver=" + "201808281000"
return UrlManager.build_url(path)
|
class UrlManager(object):
@staticmethod
def build_url(path):
return path
@staticmethod
def build_static_url(path):
path = path + "?ver=" + "201808281000"
return UrlManager.build_url(path)
|
none
| 1
| 2.266665
| 2
|
|
Tuplas, Listas, Dicio/ex88.py
|
viniTWL/Python-Projects
| 1
|
6625621
|
from random import sample
from time import sleep
lista = []
print('\033[0;34m-'*30)
print(' \033[0;34mJOGOS DA MEGA SENA')
print('\033[0;34m-\033[m'*30)
j = int(input('Quantos jogos você deseja gerar? '))
print('SORTEANDO...')
for i in range(0, j):
ran = sorted(sample(range(1, 60), 6))
lista.append(ran[:])
sleep(2)
print(f'Jogo {i+1}:{lista[i]}')
|
from random import sample
from time import sleep
lista = []
print('\033[0;34m-'*30)
print(' \033[0;34mJOGOS DA MEGA SENA')
print('\033[0;34m-\033[m'*30)
j = int(input('Quantos jogos você deseja gerar? '))
print('SORTEANDO...')
for i in range(0, j):
ran = sorted(sample(range(1, 60), 6))
lista.append(ran[:])
sleep(2)
print(f'Jogo {i+1}:{lista[i]}')
|
none
| 1
| 3.016613
| 3
|
|
original/v1/s005-modules/piskvorky1d/piskvorky.py
|
MartinaHytychova/pyladies.cz
| 69
|
6625622
|
<reponame>MartinaHytychova/pyladies.cz<filename>original/v1/s005-modules/piskvorky1d/piskvorky.py<gh_stars>10-100
def tah(pole, index, symbol):
if pole[index] != '-':
raise ValueError('Pole {} je obsazeno symbolem {}'.format(index, pole[index]))
return pole[:index] + symbol + pole[index + 1:]
def vyhodnot(pole):
if 'ooo' in pole:
return 'o'
if 'xxx' in pole:
return 'x'
if '-' not in pole:
return '!'
return '-'
|
def tah(pole, index, symbol):
if pole[index] != '-':
raise ValueError('Pole {} je obsazeno symbolem {}'.format(index, pole[index]))
return pole[:index] + symbol + pole[index + 1:]
def vyhodnot(pole):
if 'ooo' in pole:
return 'o'
if 'xxx' in pole:
return 'x'
if '-' not in pole:
return '!'
return '-'
|
none
| 1
| 3.667847
| 4
|
|
tarakania_rpg/rpg/items/consumables/__init__.py
|
MKokyn/discord-bot
| 0
|
6625623
|
from .consumable import Consumable
from .food import Food
from .potion import Potion
__all__ = ("Consumable", "Food", "Potion")
|
from .consumable import Consumable
from .food import Food
from .potion import Potion
__all__ = ("Consumable", "Food", "Potion")
|
none
| 1
| 1.550499
| 2
|
|
Tests/scripts/hook_validations/secrets.py
|
dimalz/content
| 0
|
6625624
|
import io
import os
import re
import math
import json
import string
from Tests.test_utils import run_command, print_error
from Tests.scripts.constants import *
# secrets settings
# Entropy score is determined by shanon's entropy algorithm, most English words will score between 1.5 and 3.5
ENTROPY_THRESHOLD = 4.2
SKIPPED_FILES = {'secrets_white_list', 'id_set.json', 'conf.json', 'Pipfile'}
ACCEPTED_FILE_STATUSES = ['M', 'A', "R099"]
TEXT_FILE_TYPES = {'.yml', '.py', '.json', '.md', '.txt', '.sh', '.ini', '.eml', '', '.csv', '.js', '.pdf', '.html'}
SKIP_FILE_TYPE_ENTROPY_CHECKS = {'.eml'}
SKIP_DEMISTO_TYPE_ENTROPY_CHECKS = {'playbook-'}
# disable-secrets-detection-start
# secrets
URLS_REGEX = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
EMAIL_REGEX = r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+'
IPV6_REGEX = r'(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1' \
r'[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::' \
r'(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]' \
r'{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|' \
r'(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}' \
r'|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}' \
r'(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])' \
r'\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}' \
r'[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}' \
r'|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:' \
r'(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4]' \
r'[0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]' \
r'{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]' \
r'|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|' \
r'(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)'
IPV4_REGEX = r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b'
DATES_REGEX = r'((\d{4}[/.-]\d{2}[/.-]\d{2})[T\s](\d{2}:?\d{2}:?\d{2}:?(\.\d{5,10})?([+-]\d{2}:?\d{2})?Z?)?)'
# false positives
UUID_REGEX = r'([\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{8,12})'
# disable-secrets-detection-end
def get_secrets(branch_name, is_circle):
secrets_found = {}
secrets_found_string = ''
if not run_command('git rev-parse -q --verify MERGE_HEAD'):
secrets_file_paths = get_all_diff_text_files(branch_name, is_circle)
secrets_found = search_potential_secrets(secrets_file_paths)
if secrets_found:
secrets_found_string += 'Secrets were found in the following files:\n'
for file_name in secrets_found:
secrets_found_string += ('\nFile Name: ' + file_name)
secrets_found_string += json.dumps(secrets_found[file_name], indent=4)
if not is_circle:
secrets_found_string += 'Remove or whitelist secrets in order to proceed, then re-commit\n'
else:
secrets_found_string += 'The secrets were exposed in public repository,' \
' remove the files asap and report it.\n'
secrets_found_string += 'For more information about whitelisting please visit: ' \
'https://github.com/demisto/internal-content/tree/master/documentation/secrets'
if secrets_found:
print_error(secrets_found_string)
return secrets_found
def get_all_diff_text_files(branch_name, is_circle):
"""
Get all new/modified text files that need to be searched for secrets
:param branch_name: current branch being worked on
:param is_circle: boolean to check if being ran from circle
:return: list: list of text files
"""
if is_circle:
branch_changed_files_string = \
run_command("git diff --name-status origin/master...{}".format(branch_name))
text_files_list = get_diff_text_files(branch_changed_files_string)
else:
local_changed_files_string = run_command("git diff --name-status --no-merges HEAD")
text_files_list = get_diff_text_files(local_changed_files_string)
return text_files_list
def get_diff_text_files(files_string):
"""Filter out only added/modified text files from git diff
:param files_string: string representing the git diff files
:return: text_files_list: string of full path to text files
"""
# file statuses to filter from the diff, no need to test deleted files.
all_files = files_string.split('\n')
text_files_list = set()
for file_name in all_files:
file_data = file_name.split()
if not file_data:
continue
file_status = file_data[0]
if file_status.upper() == "R099":
# if filename renamed
# sometimes the R comes with numbers R099,
# the R status file usually will look like:
# R099 TestsPlaybooks/foo.yml TestPlaybooks/playbook-foo.yml
# that is why we set index 2 to file_path - the second index is the updated file name
file_path = file_data[2]
else:
file_path = file_data[1]
# only modified/added file, text readable, exclude white_list file
if file_status.upper() in ACCEPTED_FILE_STATUSES and is_text_file(file_path):
if not any(skipped_file in file_path for skipped_file in SKIPPED_FILES):
text_files_list.add(file_path)
return text_files_list
def is_text_file(file_path):
file_extension = os.path.splitext(file_path)[1]
if file_extension in TEXT_FILE_TYPES:
return True
return False
def search_potential_secrets(secrets_file_paths):
"""Returns potential secrets(sensitive data) found in committed and added files
:param secrets_file_paths: paths of files that are being commited to git repo
:return: dictionary(filename: (list)secrets) of strings sorted by file name for secrets found in files
"""
secrets_found = {}
# Get generic white list set
conf_secrets_white_list, ioc_white_list, files_white_list = get_white_list()
for file_path in secrets_file_paths:
if file_path in files_white_list:
print("Skipping secrets detection for file: {} as it is white listed".format(file_path))
continue
file_name = os.path.basename(file_path)
high_entropy_strings = []
secrets_found_with_regex = []
yml_file_contents = None
_, file_extension = os.path.splitext(file_path)
skip_secrets = False
secrets_white_list = set(conf_secrets_white_list)
# get file contents
file_contents = get_file_contents(file_path, file_extension)
# Validate if it is integration documentation file
integration_readme = re.match(pattern=INTEGRATION_README_REGEX,
string=file_path,
flags=re.IGNORECASE)
# if py/js file, search for yml in order to retrieve temp white list
if file_extension in {'.py', '.js'} or integration_readme:
yml_file_contents = retrieve_related_yml(os.path.dirname(file_path))
# Add all context output paths keywords to whitelist temporary
if file_extension == '.yml' or yml_file_contents:
temp_white_list = create_temp_white_list(yml_file_contents if yml_file_contents else file_contents)
secrets_white_list = secrets_white_list.union(temp_white_list)
# Search by lines after strings with high entropy as possibly suspicious
for line in file_contents.split('\n'):
# if detected disable-secrets comment, skip the line
skip_secrets = is_secrets_disabled(line, skip_secrets)
if skip_secrets:
continue
# REGEX scanning for IOCs and false positive groups
regex_secrets, false_positives = regex_for_secrets(line)
for regex_secret in regex_secrets:
if not any(ioc.lower() in regex_secret.lower() for ioc in ioc_white_list):
secrets_found_with_regex.append(regex_secret)
# added false positives into white list array before testing the strings in line
secrets_white_list = secrets_white_list.union(false_positives)
# due to nature of eml files, skip string by string secret detection - only regex
if file_extension in SKIP_FILE_TYPE_ENTROPY_CHECKS or \
any(demisto_type in file_name for demisto_type in SKIP_DEMISTO_TYPE_ENTROPY_CHECKS):
continue
line = remove_false_positives(line)
# calculate entropy for each string in the file
for string_ in line.split():
# compare the lower case of the string against both generic whitelist & temp white list
if not any(white_list_string.lower() in string_.lower() for white_list_string in secrets_white_list):
entropy = calculate_shannon_entropy(string_)
if entropy >= ENTROPY_THRESHOLD:
high_entropy_strings.append(string_)
if high_entropy_strings or secrets_found_with_regex:
# uniquify identical matches between lists
file_secrets = list(set(high_entropy_strings + secrets_found_with_regex))
secrets_found[file_name] = file_secrets
return secrets_found
def create_temp_white_list(file_contents):
temp_white_list = set()
context_paths = re.findall(r'contextPath: (\S+\.+\S+)', file_contents)
for context_path in context_paths:
context_path = context_path.split('.')
context_path = [white_item.lower() for white_item in context_path if len(white_item) > 4]
temp_white_list = temp_white_list.union(context_path)
return temp_white_list
def retrieve_related_yml(integration_path):
matching_yml_file_contents = None
yml_file = os.path.join(integration_path, os.path.basename(integration_path) + '.yml')
if os.path.exists(yml_file):
with io.open('./' + yml_file, mode="r", encoding="utf-8") as matching_yml_file:
matching_yml_file_contents = matching_yml_file.read()
return matching_yml_file_contents
def regex_for_secrets(line):
"""Scans for IOCs with potentially low entropy score
:param line: line to test as string representation (string)
:return potential_secrets (list) IOCs found via regex, false_positives (list) Non secrets with high entropy
"""
potential_secrets = []
false_positives = []
# Dates REGEX for false positive preventing since they have high entropy
dates = re.findall(DATES_REGEX, line)
if dates:
false_positives += [date[0].lower() for date in dates]
# UUID REGEX
uuids = re.findall(UUID_REGEX, line)
if uuids:
false_positives += uuids
# docker images version are detected as ips. so we ignore and whitelist them
# example: dockerimage: demisto/duoadmin:172.16.17.32
re_res = re.search(r'dockerimage:\s*\w*demisto/\w+:(\d+.\d+.\d+.\d+)', line)
if re_res:
docker_version = re_res.group(1)
false_positives.append(docker_version)
line = line.replace(docker_version, '')
# URL REGEX
urls = re.findall(URLS_REGEX, line)
if urls:
potential_secrets += urls
# EMAIL REGEX
emails = re.findall(EMAIL_REGEX, line)
if emails:
potential_secrets += emails
# IPV6 REGEX
ipv6_list = re.findall(IPV6_REGEX, line)
if ipv6_list:
for ipv6 in ipv6_list:
if ipv6 != '::' and len(ipv6) > 4:
potential_secrets.append(ipv6)
# IPV4 REGEX
ipv4_list = re.findall(IPV4_REGEX, line)
if ipv4_list:
potential_secrets += ipv4_list
return potential_secrets, false_positives
def calculate_shannon_entropy(data):
"""Algorithm to determine the randomness of a given data.
Higher is more random/complex, most English words will yield in average result of 3
:param data: could be either a list/dict or a string.
:return: entropy: entropy score.
"""
if not data:
return 0
entropy = 0
# each unicode code representation of all characters which are considered printable
for x in (ord(c) for c in string.printable):
# probability of event X
px = float(data.count(chr(x))) / len(data)
if px > 0:
# the information in every possible news, in bits
entropy += - px * math.log(px, 2)
return entropy
def get_white_list():
with io.open('./Tests/secrets_white_list.json', mode="r", encoding="utf-8") as secrets_white_list_file:
final_white_list = []
ioc_white_list = []
files_while_list = []
secrets_white_list_file = json.load(secrets_white_list_file)
for name, white_list in secrets_white_list_file.iteritems():
if name == 'iocs':
for sublist in white_list:
ioc_white_list += [white_item for white_item in white_list[sublist] if len(white_item) > 4]
final_white_list += ioc_white_list
elif name == 'files':
files_while_list = white_list
else:
final_white_list += [white_item for white_item in white_list if len(white_item) > 4]
return set(final_white_list), set(ioc_white_list), set(files_while_list)
def get_file_contents(file_path, file_extension):
try:
# if pdf file, parse text
if file_extension == '.pdf':
file_contents = extract_text_from_pdf(file_path)
else:
# Open each file, read its contents in UTF-8 encoding to avoid unicode characters
with io.open('./' + file_path, mode="r", encoding="utf-8", errors='ignore') as commited_file:
file_contents = commited_file.read()
file_contents = ignore_base64(file_contents)
return file_contents
except Exception as ex:
print("Failed opening file: {}. Exception: {}".format(file_path, ex))
raise
def extract_text_from_pdf(file_path):
page_num = 0
file_contents = ''
try:
pdf_file_obj = open('./' + file_path, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
num_pages = pdf_reader.numPages
except PyPDF2.utils.PdfReadError:
print('ERROR: Could not parse PDF file in path: {} - ***Review Manually***'.format(file_path))
return file_contents
while page_num < num_pages:
pdf_page = pdf_reader.getPage(page_num)
page_num += 1
file_contents += pdf_page.extractText()
return file_contents
def remove_false_positives(line):
false_positive = re.search('([^\s]*[(\[{].*[)\]}][^\s]*)', line)
if false_positive:
false_positive = false_positive.group(1)
line = line.replace(false_positive, '')
return line
def is_secrets_disabled(line, skip_secrets):
if bool(re.findall(r'(disable-secrets-detection)', line)):
skip_secrets = True
elif bool(re.findall(r'(disable-secrets-detection-start)', line)):
skip_secrets = True
elif bool(re.findall(r'(disable-secrets-detection-end)', line)):
skip_secrets = False
elif not skip_secrets:
skip_secrets = False
return skip_secrets
def ignore_base64(file_contents):
base64_strings = re.findall(r'(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|'
r'[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})', file_contents)
for base64_string in base64_strings:
if len(base64_string) > 500:
file_contents = file_contents.replace(base64_string, '')
return file_contents
|
import io
import os
import re
import math
import json
import string
from Tests.test_utils import run_command, print_error
from Tests.scripts.constants import *
# secrets settings
# Entropy score is determined by shanon's entropy algorithm, most English words will score between 1.5 and 3.5
ENTROPY_THRESHOLD = 4.2
SKIPPED_FILES = {'secrets_white_list', 'id_set.json', 'conf.json', 'Pipfile'}
ACCEPTED_FILE_STATUSES = ['M', 'A', "R099"]
TEXT_FILE_TYPES = {'.yml', '.py', '.json', '.md', '.txt', '.sh', '.ini', '.eml', '', '.csv', '.js', '.pdf', '.html'}
SKIP_FILE_TYPE_ENTROPY_CHECKS = {'.eml'}
SKIP_DEMISTO_TYPE_ENTROPY_CHECKS = {'playbook-'}
# disable-secrets-detection-start
# secrets
URLS_REGEX = r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
EMAIL_REGEX = r'[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+'
IPV6_REGEX = r'(?:(?:[0-9A-Fa-f]{1,4}:){6}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1' \
r'[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|::' \
r'(?:[0-9A-Fa-f]{1,4}:){5}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]' \
r'{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|' \
r'(?:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){4}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}' \
r'|2[0-4][0-9]|25[0-5]))|(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){3}' \
r'(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])' \
r'\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,2}' \
r'[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:){2}(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}' \
r'|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]{1,4}:){,3}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}:' \
r'(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4]' \
r'[0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|(?:(?:[0-9A-Fa-f]' \
r'{1,4}:){,4}[0-9A-Fa-f]{1,4})?::(?:[0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{1,4}|(?:(?:[0-9]|[1-9][0-9]' \
r'|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}(?:[0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]))|' \
r'(?:(?:[0-9A-Fa-f]{1,4}:){,5}[0-9A-Fa-f]{1,4})?::[0-9A-Fa-f]{1,4}|' \
r'(?:(?:[0-9A-Fa-f]{1,4}:){,6}[0-9A-Fa-f]{1,4})?::)'
IPV4_REGEX = r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b'
DATES_REGEX = r'((\d{4}[/.-]\d{2}[/.-]\d{2})[T\s](\d{2}:?\d{2}:?\d{2}:?(\.\d{5,10})?([+-]\d{2}:?\d{2})?Z?)?)'
# false positives
UUID_REGEX = r'([\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{8,12})'
# disable-secrets-detection-end
def get_secrets(branch_name, is_circle):
secrets_found = {}
secrets_found_string = ''
if not run_command('git rev-parse -q --verify MERGE_HEAD'):
secrets_file_paths = get_all_diff_text_files(branch_name, is_circle)
secrets_found = search_potential_secrets(secrets_file_paths)
if secrets_found:
secrets_found_string += 'Secrets were found in the following files:\n'
for file_name in secrets_found:
secrets_found_string += ('\nFile Name: ' + file_name)
secrets_found_string += json.dumps(secrets_found[file_name], indent=4)
if not is_circle:
secrets_found_string += 'Remove or whitelist secrets in order to proceed, then re-commit\n'
else:
secrets_found_string += 'The secrets were exposed in public repository,' \
' remove the files asap and report it.\n'
secrets_found_string += 'For more information about whitelisting please visit: ' \
'https://github.com/demisto/internal-content/tree/master/documentation/secrets'
if secrets_found:
print_error(secrets_found_string)
return secrets_found
def get_all_diff_text_files(branch_name, is_circle):
"""
Get all new/modified text files that need to be searched for secrets
:param branch_name: current branch being worked on
:param is_circle: boolean to check if being ran from circle
:return: list: list of text files
"""
if is_circle:
branch_changed_files_string = \
run_command("git diff --name-status origin/master...{}".format(branch_name))
text_files_list = get_diff_text_files(branch_changed_files_string)
else:
local_changed_files_string = run_command("git diff --name-status --no-merges HEAD")
text_files_list = get_diff_text_files(local_changed_files_string)
return text_files_list
def get_diff_text_files(files_string):
"""Filter out only added/modified text files from git diff
:param files_string: string representing the git diff files
:return: text_files_list: string of full path to text files
"""
# file statuses to filter from the diff, no need to test deleted files.
all_files = files_string.split('\n')
text_files_list = set()
for file_name in all_files:
file_data = file_name.split()
if not file_data:
continue
file_status = file_data[0]
if file_status.upper() == "R099":
# if filename renamed
# sometimes the R comes with numbers R099,
# the R status file usually will look like:
# R099 TestsPlaybooks/foo.yml TestPlaybooks/playbook-foo.yml
# that is why we set index 2 to file_path - the second index is the updated file name
file_path = file_data[2]
else:
file_path = file_data[1]
# only modified/added file, text readable, exclude white_list file
if file_status.upper() in ACCEPTED_FILE_STATUSES and is_text_file(file_path):
if not any(skipped_file in file_path for skipped_file in SKIPPED_FILES):
text_files_list.add(file_path)
return text_files_list
def is_text_file(file_path):
file_extension = os.path.splitext(file_path)[1]
if file_extension in TEXT_FILE_TYPES:
return True
return False
def search_potential_secrets(secrets_file_paths):
"""Returns potential secrets(sensitive data) found in committed and added files
:param secrets_file_paths: paths of files that are being commited to git repo
:return: dictionary(filename: (list)secrets) of strings sorted by file name for secrets found in files
"""
secrets_found = {}
# Get generic white list set
conf_secrets_white_list, ioc_white_list, files_white_list = get_white_list()
for file_path in secrets_file_paths:
if file_path in files_white_list:
print("Skipping secrets detection for file: {} as it is white listed".format(file_path))
continue
file_name = os.path.basename(file_path)
high_entropy_strings = []
secrets_found_with_regex = []
yml_file_contents = None
_, file_extension = os.path.splitext(file_path)
skip_secrets = False
secrets_white_list = set(conf_secrets_white_list)
# get file contents
file_contents = get_file_contents(file_path, file_extension)
# Validate if it is integration documentation file
integration_readme = re.match(pattern=INTEGRATION_README_REGEX,
string=file_path,
flags=re.IGNORECASE)
# if py/js file, search for yml in order to retrieve temp white list
if file_extension in {'.py', '.js'} or integration_readme:
yml_file_contents = retrieve_related_yml(os.path.dirname(file_path))
# Add all context output paths keywords to whitelist temporary
if file_extension == '.yml' or yml_file_contents:
temp_white_list = create_temp_white_list(yml_file_contents if yml_file_contents else file_contents)
secrets_white_list = secrets_white_list.union(temp_white_list)
# Search by lines after strings with high entropy as possibly suspicious
for line in file_contents.split('\n'):
# if detected disable-secrets comment, skip the line
skip_secrets = is_secrets_disabled(line, skip_secrets)
if skip_secrets:
continue
# REGEX scanning for IOCs and false positive groups
regex_secrets, false_positives = regex_for_secrets(line)
for regex_secret in regex_secrets:
if not any(ioc.lower() in regex_secret.lower() for ioc in ioc_white_list):
secrets_found_with_regex.append(regex_secret)
# added false positives into white list array before testing the strings in line
secrets_white_list = secrets_white_list.union(false_positives)
# due to nature of eml files, skip string by string secret detection - only regex
if file_extension in SKIP_FILE_TYPE_ENTROPY_CHECKS or \
any(demisto_type in file_name for demisto_type in SKIP_DEMISTO_TYPE_ENTROPY_CHECKS):
continue
line = remove_false_positives(line)
# calculate entropy for each string in the file
for string_ in line.split():
# compare the lower case of the string against both generic whitelist & temp white list
if not any(white_list_string.lower() in string_.lower() for white_list_string in secrets_white_list):
entropy = calculate_shannon_entropy(string_)
if entropy >= ENTROPY_THRESHOLD:
high_entropy_strings.append(string_)
if high_entropy_strings or secrets_found_with_regex:
# uniquify identical matches between lists
file_secrets = list(set(high_entropy_strings + secrets_found_with_regex))
secrets_found[file_name] = file_secrets
return secrets_found
def create_temp_white_list(file_contents):
temp_white_list = set()
context_paths = re.findall(r'contextPath: (\S+\.+\S+)', file_contents)
for context_path in context_paths:
context_path = context_path.split('.')
context_path = [white_item.lower() for white_item in context_path if len(white_item) > 4]
temp_white_list = temp_white_list.union(context_path)
return temp_white_list
def retrieve_related_yml(integration_path):
matching_yml_file_contents = None
yml_file = os.path.join(integration_path, os.path.basename(integration_path) + '.yml')
if os.path.exists(yml_file):
with io.open('./' + yml_file, mode="r", encoding="utf-8") as matching_yml_file:
matching_yml_file_contents = matching_yml_file.read()
return matching_yml_file_contents
def regex_for_secrets(line):
"""Scans for IOCs with potentially low entropy score
:param line: line to test as string representation (string)
:return potential_secrets (list) IOCs found via regex, false_positives (list) Non secrets with high entropy
"""
potential_secrets = []
false_positives = []
# Dates REGEX for false positive preventing since they have high entropy
dates = re.findall(DATES_REGEX, line)
if dates:
false_positives += [date[0].lower() for date in dates]
# UUID REGEX
uuids = re.findall(UUID_REGEX, line)
if uuids:
false_positives += uuids
# docker images version are detected as ips. so we ignore and whitelist them
# example: dockerimage: demisto/duoadmin:172.16.17.32
re_res = re.search(r'dockerimage:\s*\w*demisto/\w+:(\d+.\d+.\d+.\d+)', line)
if re_res:
docker_version = re_res.group(1)
false_positives.append(docker_version)
line = line.replace(docker_version, '')
# URL REGEX
urls = re.findall(URLS_REGEX, line)
if urls:
potential_secrets += urls
# EMAIL REGEX
emails = re.findall(EMAIL_REGEX, line)
if emails:
potential_secrets += emails
# IPV6 REGEX
ipv6_list = re.findall(IPV6_REGEX, line)
if ipv6_list:
for ipv6 in ipv6_list:
if ipv6 != '::' and len(ipv6) > 4:
potential_secrets.append(ipv6)
# IPV4 REGEX
ipv4_list = re.findall(IPV4_REGEX, line)
if ipv4_list:
potential_secrets += ipv4_list
return potential_secrets, false_positives
def calculate_shannon_entropy(data):
"""Algorithm to determine the randomness of a given data.
Higher is more random/complex, most English words will yield in average result of 3
:param data: could be either a list/dict or a string.
:return: entropy: entropy score.
"""
if not data:
return 0
entropy = 0
# each unicode code representation of all characters which are considered printable
for x in (ord(c) for c in string.printable):
# probability of event X
px = float(data.count(chr(x))) / len(data)
if px > 0:
# the information in every possible news, in bits
entropy += - px * math.log(px, 2)
return entropy
def get_white_list():
with io.open('./Tests/secrets_white_list.json', mode="r", encoding="utf-8") as secrets_white_list_file:
final_white_list = []
ioc_white_list = []
files_while_list = []
secrets_white_list_file = json.load(secrets_white_list_file)
for name, white_list in secrets_white_list_file.iteritems():
if name == 'iocs':
for sublist in white_list:
ioc_white_list += [white_item for white_item in white_list[sublist] if len(white_item) > 4]
final_white_list += ioc_white_list
elif name == 'files':
files_while_list = white_list
else:
final_white_list += [white_item for white_item in white_list if len(white_item) > 4]
return set(final_white_list), set(ioc_white_list), set(files_while_list)
def get_file_contents(file_path, file_extension):
try:
# if pdf file, parse text
if file_extension == '.pdf':
file_contents = extract_text_from_pdf(file_path)
else:
# Open each file, read its contents in UTF-8 encoding to avoid unicode characters
with io.open('./' + file_path, mode="r", encoding="utf-8", errors='ignore') as commited_file:
file_contents = commited_file.read()
file_contents = ignore_base64(file_contents)
return file_contents
except Exception as ex:
print("Failed opening file: {}. Exception: {}".format(file_path, ex))
raise
def extract_text_from_pdf(file_path):
page_num = 0
file_contents = ''
try:
pdf_file_obj = open('./' + file_path, 'rb')
pdf_reader = PyPDF2.PdfFileReader(pdf_file_obj)
num_pages = pdf_reader.numPages
except PyPDF2.utils.PdfReadError:
print('ERROR: Could not parse PDF file in path: {} - ***Review Manually***'.format(file_path))
return file_contents
while page_num < num_pages:
pdf_page = pdf_reader.getPage(page_num)
page_num += 1
file_contents += pdf_page.extractText()
return file_contents
def remove_false_positives(line):
false_positive = re.search('([^\s]*[(\[{].*[)\]}][^\s]*)', line)
if false_positive:
false_positive = false_positive.group(1)
line = line.replace(false_positive, '')
return line
def is_secrets_disabled(line, skip_secrets):
if bool(re.findall(r'(disable-secrets-detection)', line)):
skip_secrets = True
elif bool(re.findall(r'(disable-secrets-detection-start)', line)):
skip_secrets = True
elif bool(re.findall(r'(disable-secrets-detection-end)', line)):
skip_secrets = False
elif not skip_secrets:
skip_secrets = False
return skip_secrets
def ignore_base64(file_contents):
base64_strings = re.findall(r'(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|'
r'[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{4})', file_contents)
for base64_string in base64_strings:
if len(base64_string) > 500:
file_contents = file_contents.replace(base64_string, '')
return file_contents
|
en
| 0.835256
|
# secrets settings # Entropy score is determined by shanon's entropy algorithm, most English words will score between 1.5 and 3.5 # disable-secrets-detection-start # secrets # false positives # disable-secrets-detection-end Get all new/modified text files that need to be searched for secrets :param branch_name: current branch being worked on :param is_circle: boolean to check if being ran from circle :return: list: list of text files Filter out only added/modified text files from git diff :param files_string: string representing the git diff files :return: text_files_list: string of full path to text files # file statuses to filter from the diff, no need to test deleted files. # if filename renamed # sometimes the R comes with numbers R099, # the R status file usually will look like: # R099 TestsPlaybooks/foo.yml TestPlaybooks/playbook-foo.yml # that is why we set index 2 to file_path - the second index is the updated file name # only modified/added file, text readable, exclude white_list file Returns potential secrets(sensitive data) found in committed and added files :param secrets_file_paths: paths of files that are being commited to git repo :return: dictionary(filename: (list)secrets) of strings sorted by file name for secrets found in files # Get generic white list set # get file contents # Validate if it is integration documentation file # if py/js file, search for yml in order to retrieve temp white list # Add all context output paths keywords to whitelist temporary # Search by lines after strings with high entropy as possibly suspicious # if detected disable-secrets comment, skip the line # REGEX scanning for IOCs and false positive groups # added false positives into white list array before testing the strings in line # due to nature of eml files, skip string by string secret detection - only regex # calculate entropy for each string in the file # compare the lower case of the string against both generic whitelist & temp white list # uniquify identical matches between lists Scans for IOCs with potentially low entropy score :param line: line to test as string representation (string) :return potential_secrets (list) IOCs found via regex, false_positives (list) Non secrets with high entropy # Dates REGEX for false positive preventing since they have high entropy # UUID REGEX # docker images version are detected as ips. so we ignore and whitelist them # example: dockerimage: demisto/duoadmin:172.16.17.32 # URL REGEX # EMAIL REGEX # IPV6 REGEX # IPV4 REGEX Algorithm to determine the randomness of a given data. Higher is more random/complex, most English words will yield in average result of 3 :param data: could be either a list/dict or a string. :return: entropy: entropy score. # each unicode code representation of all characters which are considered printable # probability of event X # the information in every possible news, in bits # if pdf file, parse text # Open each file, read its contents in UTF-8 encoding to avoid unicode characters
| 1.985501
| 2
|
weather/views.py
|
Sanjin002/AGRO-kopija
| 0
|
6625625
|
from django.shortcuts import render
import requests
from datetime import datetime
from django.http import JsonResponse
# Create your views here.
def home_weather(request):
#ovdje spremamo url od meteobaze
url = 'http://meteo.pointjupiter.co/'
response = requests.get(url.format()).json()
# ovdje imamo spremljenu sad cijelu listu objekata gradova
cities = response['cities']
context = {
'cities':cities
}
return JsonResponse(context)
def home_weather_prediction(request, city_name, option):
now = datetime.now()
curr_date = now.strftime("%Y-%m-%d")
print(curr_date)
url = 'http://meteo.pointjupiter.co/'
response = requests.get(url.format()).json()
cities = response['cities']
weather_json = {
'day_1':'',
'day_2':'',
'day_3':'',
'day_4':'',
'day_5':''
}
for city in cities:
if city['name'] == city_name:
if option == 1:
city_url = city['url']
responsee = requests.get(city_url.format()).json()
weather_json={
'day_1':responsee['data'][0]['forecast']
}
if option == 3:
city_url = city['url']
responsee = requests.get(city_url.format()).json()
weather_json = {
'day_1':responsee['data'][0]['forecast'],
'day_2':responsee['data'][1]['forecast'],
'day_3':responsee['data'][2]['forecast']
}
if option == 5:
city_url=city['url']
responsee = requests.get(city_url.format()).json()
weather_json = {
'day_1':responsee['data'][0]['forecast'],
'day_2':responsee['data'][1]['forecast'],
'day_3':responsee['data'][2]['forecast'],
'day_4':responsee['data'][3]['forecast'],
'day_5':responsee['data'][4]['forecast']
}
return JsonResponse(weather_json)
|
from django.shortcuts import render
import requests
from datetime import datetime
from django.http import JsonResponse
# Create your views here.
def home_weather(request):
#ovdje spremamo url od meteobaze
url = 'http://meteo.pointjupiter.co/'
response = requests.get(url.format()).json()
# ovdje imamo spremljenu sad cijelu listu objekata gradova
cities = response['cities']
context = {
'cities':cities
}
return JsonResponse(context)
def home_weather_prediction(request, city_name, option):
now = datetime.now()
curr_date = now.strftime("%Y-%m-%d")
print(curr_date)
url = 'http://meteo.pointjupiter.co/'
response = requests.get(url.format()).json()
cities = response['cities']
weather_json = {
'day_1':'',
'day_2':'',
'day_3':'',
'day_4':'',
'day_5':''
}
for city in cities:
if city['name'] == city_name:
if option == 1:
city_url = city['url']
responsee = requests.get(city_url.format()).json()
weather_json={
'day_1':responsee['data'][0]['forecast']
}
if option == 3:
city_url = city['url']
responsee = requests.get(city_url.format()).json()
weather_json = {
'day_1':responsee['data'][0]['forecast'],
'day_2':responsee['data'][1]['forecast'],
'day_3':responsee['data'][2]['forecast']
}
if option == 5:
city_url=city['url']
responsee = requests.get(city_url.format()).json()
weather_json = {
'day_1':responsee['data'][0]['forecast'],
'day_2':responsee['data'][1]['forecast'],
'day_3':responsee['data'][2]['forecast'],
'day_4':responsee['data'][3]['forecast'],
'day_5':responsee['data'][4]['forecast']
}
return JsonResponse(weather_json)
|
bs
| 0.371582
|
# Create your views here. #ovdje spremamo url od meteobaze # ovdje imamo spremljenu sad cijelu listu objekata gradova
| 2.628685
| 3
|
expert.py
|
davtoh/RRTools
| 1
|
6625626
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import input
from past.builtins import basestring
from builtins import object
import os
import cv2
import numpy as np
from RRtoolbox.lib.arrayops import contours2mask, findContours
from RRtoolbox.lib.cache import MemoizedDict
from RRtoolbox.lib.directory import getData, getPath, mkPath, increment_if_exits
from RRtoolbox.lib.image import getcoors, loadFunc, drawcoorarea, Image
from RRtoolbox.lib.root import glob, NameSpace
from RRtoolbox.shell import string_interpreter
from imrestore import check_valid
class ImageExpert(Image):
def __init__(self, data, shape = None):
if isinstance(data,basestring):
fn = data
data = {"fn":fn,"shape":shape}
else:
fn = data["fn"]
shape = data["shape"] # loads image with shape of coordinates
self.data = data
base,path,name,ext = getData(fn)
super(ImageExpert,self).__init__(path=path, ext=ext, name=name, shape=shape)
def coordinates(self, key, msg=None, times=None, review= False):
self.data["shape"] = self.BGRA.shape[:2] # update loaded shape
if msg is None:
msg = "{i}/{limit} Select region for {key}:"
if times is None:
times = 1
assert isinstance(times,int) or times>0 or times == True
# init default dictionary to format
default = dict(key=key,
name=self.name,
path=self.path,
fn=self.data["fn"],
shape=self.data["shape"])
# get old coordinates
old = self.data.get(key)
# init new coordinates
coor_list = []
if review and old is not None:
default["limit"] = len(old)
for coors in old:
formatted = msg.format(i=len(coor_list)+1,**default)
coors = getcoors(self.BGRA,formatted,drawcoorarea,coors=coors)
coor_list.append(coors)
if not isinstance(times,bool):
times -= 1
elif old is not None:
coor_list = old
if review or old is None:
default["limit"] = times
while times:
default["remain"] = times
formatted = msg.format(i=len(coor_list)+1,**default)
coors = getcoors(self.BGRA,formatted,drawcoorarea)
if not coors:
break
coor_list.append(coors)
if not isinstance(times,bool):
times -= 1
#if not coor_list: # ensures there is coors inisite coor_list
# coor_list = [[]]
self.data[key] = coor_list
return coor_list
class Expert(object):
"""
Class to generate images expert data (experimental)
"""
def __init__(self, path, data = None, shape = None, modify=False, reset=False,
review = False, inpath=None, ask=False, contents="*.*",
filter=check_valid):
"""
:param path:
:param data:
:param modify:
:param reset:
:param review:
:param inpath:
:param ask:
:param contents:
:param filter: function to filter out files
"""
if isinstance(path, basestring):
path = os.path.abspath(path) # ensures absolute path
self.fns = fns = glob(path, contents=contents, check=filter)
else:
fns = [os.path.abspath(set) for set in path]
if not fns:
raise Exception("not images to get expert data")
# mode 0 lest the user run all un-cached tests,
# mode 1 lets the user run all tests and correct cached tests.
# mode 2 lets is used to change the fields in the cached data
if data is None:
data = {}
elif isinstance(data,basestring):
data = MemoizedDict(data)
else: # it should be dictionary
pass
self.data = data
self.ask = ask
self.fns = fns
self.reset = reset
self.review = review
self.inpath = inpath
self.modify = modify
self.shape = shape
def start(self):
data = self.data
for i,fn in enumerate(self.fns):
inpath = self.inpath
if self.ask and not input("check {}?(yes/no)".format(fn)).lower() in ("not","no","n"):
continue
print("{}/{} checking {}".format(i+1,len(self.fns),fn))
key = Expert.get_key(fn)
exp = None
memo_inpath = None
if inpath is not None:
p = os.path.join(os.path.split(fn)[0],inpath)
memo_inpath = MemoizedDict(p)
if key in memo_inpath:
exp = ImageExpert(memo_inpath[key],shape=self.shape)
if exp is None and key in data and not self.reset:
exp = ImageExpert(data[key],shape=self.shape)
elif exp is None:
exp = ImageExpert(fn,shape=self.shape)
# cache new data or replace previous test
exp.coordinates(msg="Select retinal area for {}".format(key),
key="coors_retina",review=self.review)
exp.coordinates(msg="Select optic disc for {}".format(key),
key="coors_optic_disc",review=self.review)
exp.coordinates(msg="Select defects (inside retina)-{i}/{limit}:",
key="coors_defects",review=self.review,times=True)
exp.coordinates(msg="Select noisy areas -{i}/{limit}:",
key="coors_noisy",review=self.review,times=True)
if self.modify:
self.new_modify(exp.data)
# register expert data
exp_data = dict(exp.data)
data[key] = exp_data # ensures that memoizedDicts are converted to dict
if inpath is not None and memo_inpath is not None:
memo_inpath[key] = exp_data
@classmethod
def get_key(self,fn):
return "".join(getData(fn)[-2:])
def new_modify(self, dict):
pass
def shell(args=None, namespace=None):
"""
Shell to run in terminal the expert data generator program
:param args: (None) list of arguments. If None it captures the
arguments in sys.
:param namespace: (None) namespace to place variables. If None
it creates a namespace.
:return: namespace
"""
if namespace is None:
namespace = NameSpace()
import argparse
parser = argparse.ArgumentParser(formatter_class = argparse.RawDescriptionHelpFormatter,
description = "Create expert data for images",
epilog="\nContributions and bug reports are appreciated."
"\nauthor: <NAME>"
"\ne-mail: <EMAIL>"
"\nproject: https://github.com/davtoh/RRtools")
parser.add_argument('path', nargs=1,
help='')
parser.add_argument('-o', '--output', default="{path}/_expert", nargs='?', action="store",
const="{path}/_expert", type = string_interpreter(),
help='Customize output folder for expert data')
parser.add_argument('-s', '--subfolders', action='store_true',
help='Look for images in sub folders')
parser.add_argument('-f', '--from', type = str,
help='Start from a given pattern')
parser.add_argument('-i', '--inpath', type = str,
help='Save folder of expert data also in the path of the image')
parser.add_argument('-m', '--modify', action='store_true',
help='')
parser.add_argument('-r', '--reset', action='store_true',
help='')
parser.add_argument('-v', '--review', action='store_true',
help='')
parser.add_argument('-a', '--ask', action='store_true',
help='')
parser.add_argument('-c', '--contents', type = str, default="*.*",
help='pattern to look in folder')
# parse sys and get argument variables
args = vars(parser.parse_args(args=args, namespace=namespace))
setspath = args.pop("path")[0]
bfrom = args.pop("from")
expertname = args.pop("output").format(path = setspath)
subfolders = args.pop("subfolders")
if subfolders:
def check_dir(path):
return os.path.isdir(path) and not path.endswith(expertname)
imsets = glob(os.path.join(setspath,"") + "*", check=check_dir) # only folders
else:
imsets = [setspath]
start = False
for imset in imsets:
if bfrom is None or bfrom in imset:
start = True
if start:
exp = Expert(imset,data=expertname,**args)
exp.start()
def crop_expert(fn, outpath = None, expertpath=None, loader=None, preview=None,
form = None, startfrom = None, name = None, modify=False, reset=False,
review = False, ask=False, help = False, ext = None):
"""
Crop input image and save ROIs
:param fn: file name
:param outpath: (None)
:param expertpath:
:param loader: (loadFunc(1))
:param preview: (rect)
:param form: crop shape type supported by :func:`getROI`
:param startfrom: start from an specific pattern in path
:param name: default name to give to cropped images
which includes (ImageItem,save_path) items
:param modify:
:param reset:
:param review:
:param ask:
:param help: help the user by providinf some coordinates (experimental)
:return: ROI object, list of transformations
"""
from RRtoolFC.GUI.forms import getROI
#from RRtoolbox.lib.arrayops import foreground
from RRtoolbox.tools.segmentation import retinal_mask
imsets = glob(fn) # only folders
if preview is None:
preview = True
if form is None:
form = "rect"
if loader is None:
loader = loadFunc(1)
start = False
for impath in imsets:
if startfrom is None or startfrom in impath:
start = True
if start:
image = loader(impath).astype(np.float32)
print("loaded",impath)
a,b,c,d = getData(impath)
if name is not None: # change names
c = name
if ext is not None: # change extension
if ext.startswith("."):
d = ext
else:
d = "."+ext
# get path to save ROIs
if outpath is None: # create default path
outpath2 = a+b+c # add new folder with name of the image
else: # custom sting path
outpath2 = outpath
if expertpath is None: # create default path
expertpath2 = os.path.join(outpath2,"_expert")
else: # custom sting path
expertpath2 = expertpath
# make path if it does not exists
mkPath(getPath(outpath2))
# save ROI
fn = os.path.join(outpath2,"_original_"+c+d)
if cv2.imwrite(fn, image):
print("Original image saved as {}".format(fn))
else:
"Original image {} could not be saved".format(fn)
fn = impath
# get expert data
exp = Expert(fn,data=expertpath2, shape=image.shape, modify=modify, reset=reset,
review = review, ask=ask, filter=os.path.isfile)
contours, _ = findContours(retinal_mask(image.copy()),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
main_key = getData(fn)[-2]+d
if help and main_key not in exp.data: # help the user by guessing initial threshold
exp.data[main_key] = {"fn":fn,"shape":image.shape[:2],"coors_retina":contours}
exp.start()
expertfield = list(exp.data.values())[0]
# get ROIs
while not input("get ROI?(y,n)").lower() in ("n","not","no"):
# get ROI
roi, crop = getROI(image, preview=preview, form= form, crop=False)
fn = increment_if_exits(os.path.join(outpath2,"{}{}".format(c,d)),force=True)
imroi = roi.getArrayRegion(image, crop)
# save ROI
if cv2.imwrite(fn, imroi):
print("Saved: {}".format(fn))
else:
"{} could not be saved".format(fn)
info = {}
# automatically calculate expert data from parent image
for field,val in list(expertfield.items()):
if field.startswith("coors_"):
mask = contours2mask(val,shape=image.shape)
mask = roi.getArrayRegion(mask, crop)
contours, _ = findContours(mask.astype(np.uint8),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
info[field] = contours
elif "fn" == field:
info[field] = fn
elif "shape" == field:
info[field] = imroi.shape[:2]
else:
raise Exception("Expert structure not supported")
exp.data["".join(getData(fn)[-2:])] = info # register expert data of crop
if False and __name__ == "__main__": # for a folder with many sets
"""
Example using Expert class: generate expert data contained in results/ folder
"""
def check_dir(path):
return os.path.isdir(path) and not path.endswith(expertname)
setspath = "./results/"
expertname = "_expert"
imsets = glob(os.path.join(setspath,"") + "*", check=check_dir) # only folders
bfrom = None
start = False
for imset in imsets:
if bfrom is None or bfrom in imset:
start = True
if start:
exp = Expert(imset,data=os.path.join(setspath,expertname),inpath=expertname,review=False)
exp.start()
if False and __name__ == "__main__": # for a folder with many sets
"""
Example using crop_expert function: generate expert data from X image and save
perspectives in another path with expert data along.
"""
setpath = "/mnt/4E443F99443F82AF/MEGAsync/TESIS/DATA_RAW/DATABASE/Database_DRIVE/test/images/"
outpath = "./results/"
base_name = "test{}"
startfrom = "01_test"
loader=None
preview=True
form = "rect"
debug = True
for i,fn in enumerate(glob(setpath)):
name = base_name.format(i+1)
base_out = os.path.join(outpath,name)
crop_expert(fn=fn, outpath=base_out, startfrom=startfrom, help=True,
loader=loader, preview=preview, form= form, name=name, ext="png")
# debug automatic cropping with expert data
if debug:
print("################ DEBUG ################")
exp = Expert(base_out,data=os.path.join(base_out,"_expert"),review=True)
exp.start()
print("############## END DEBUG ##############")
if __name__ == "__main__":
"""
Call expert program from terminal
"""
shell("./results/ --subfolders".split()) # call expert shell
#shell() # call expert shell
|
from __future__ import print_function
from __future__ import absolute_import
from builtins import input
from past.builtins import basestring
from builtins import object
import os
import cv2
import numpy as np
from RRtoolbox.lib.arrayops import contours2mask, findContours
from RRtoolbox.lib.cache import MemoizedDict
from RRtoolbox.lib.directory import getData, getPath, mkPath, increment_if_exits
from RRtoolbox.lib.image import getcoors, loadFunc, drawcoorarea, Image
from RRtoolbox.lib.root import glob, NameSpace
from RRtoolbox.shell import string_interpreter
from imrestore import check_valid
class ImageExpert(Image):
def __init__(self, data, shape = None):
if isinstance(data,basestring):
fn = data
data = {"fn":fn,"shape":shape}
else:
fn = data["fn"]
shape = data["shape"] # loads image with shape of coordinates
self.data = data
base,path,name,ext = getData(fn)
super(ImageExpert,self).__init__(path=path, ext=ext, name=name, shape=shape)
def coordinates(self, key, msg=None, times=None, review= False):
self.data["shape"] = self.BGRA.shape[:2] # update loaded shape
if msg is None:
msg = "{i}/{limit} Select region for {key}:"
if times is None:
times = 1
assert isinstance(times,int) or times>0 or times == True
# init default dictionary to format
default = dict(key=key,
name=self.name,
path=self.path,
fn=self.data["fn"],
shape=self.data["shape"])
# get old coordinates
old = self.data.get(key)
# init new coordinates
coor_list = []
if review and old is not None:
default["limit"] = len(old)
for coors in old:
formatted = msg.format(i=len(coor_list)+1,**default)
coors = getcoors(self.BGRA,formatted,drawcoorarea,coors=coors)
coor_list.append(coors)
if not isinstance(times,bool):
times -= 1
elif old is not None:
coor_list = old
if review or old is None:
default["limit"] = times
while times:
default["remain"] = times
formatted = msg.format(i=len(coor_list)+1,**default)
coors = getcoors(self.BGRA,formatted,drawcoorarea)
if not coors:
break
coor_list.append(coors)
if not isinstance(times,bool):
times -= 1
#if not coor_list: # ensures there is coors inisite coor_list
# coor_list = [[]]
self.data[key] = coor_list
return coor_list
class Expert(object):
"""
Class to generate images expert data (experimental)
"""
def __init__(self, path, data = None, shape = None, modify=False, reset=False,
review = False, inpath=None, ask=False, contents="*.*",
filter=check_valid):
"""
:param path:
:param data:
:param modify:
:param reset:
:param review:
:param inpath:
:param ask:
:param contents:
:param filter: function to filter out files
"""
if isinstance(path, basestring):
path = os.path.abspath(path) # ensures absolute path
self.fns = fns = glob(path, contents=contents, check=filter)
else:
fns = [os.path.abspath(set) for set in path]
if not fns:
raise Exception("not images to get expert data")
# mode 0 lest the user run all un-cached tests,
# mode 1 lets the user run all tests and correct cached tests.
# mode 2 lets is used to change the fields in the cached data
if data is None:
data = {}
elif isinstance(data,basestring):
data = MemoizedDict(data)
else: # it should be dictionary
pass
self.data = data
self.ask = ask
self.fns = fns
self.reset = reset
self.review = review
self.inpath = inpath
self.modify = modify
self.shape = shape
def start(self):
data = self.data
for i,fn in enumerate(self.fns):
inpath = self.inpath
if self.ask and not input("check {}?(yes/no)".format(fn)).lower() in ("not","no","n"):
continue
print("{}/{} checking {}".format(i+1,len(self.fns),fn))
key = Expert.get_key(fn)
exp = None
memo_inpath = None
if inpath is not None:
p = os.path.join(os.path.split(fn)[0],inpath)
memo_inpath = MemoizedDict(p)
if key in memo_inpath:
exp = ImageExpert(memo_inpath[key],shape=self.shape)
if exp is None and key in data and not self.reset:
exp = ImageExpert(data[key],shape=self.shape)
elif exp is None:
exp = ImageExpert(fn,shape=self.shape)
# cache new data or replace previous test
exp.coordinates(msg="Select retinal area for {}".format(key),
key="coors_retina",review=self.review)
exp.coordinates(msg="Select optic disc for {}".format(key),
key="coors_optic_disc",review=self.review)
exp.coordinates(msg="Select defects (inside retina)-{i}/{limit}:",
key="coors_defects",review=self.review,times=True)
exp.coordinates(msg="Select noisy areas -{i}/{limit}:",
key="coors_noisy",review=self.review,times=True)
if self.modify:
self.new_modify(exp.data)
# register expert data
exp_data = dict(exp.data)
data[key] = exp_data # ensures that memoizedDicts are converted to dict
if inpath is not None and memo_inpath is not None:
memo_inpath[key] = exp_data
@classmethod
def get_key(self,fn):
return "".join(getData(fn)[-2:])
def new_modify(self, dict):
pass
def shell(args=None, namespace=None):
"""
Shell to run in terminal the expert data generator program
:param args: (None) list of arguments. If None it captures the
arguments in sys.
:param namespace: (None) namespace to place variables. If None
it creates a namespace.
:return: namespace
"""
if namespace is None:
namespace = NameSpace()
import argparse
parser = argparse.ArgumentParser(formatter_class = argparse.RawDescriptionHelpFormatter,
description = "Create expert data for images",
epilog="\nContributions and bug reports are appreciated."
"\nauthor: <NAME>"
"\ne-mail: <EMAIL>"
"\nproject: https://github.com/davtoh/RRtools")
parser.add_argument('path', nargs=1,
help='')
parser.add_argument('-o', '--output', default="{path}/_expert", nargs='?', action="store",
const="{path}/_expert", type = string_interpreter(),
help='Customize output folder for expert data')
parser.add_argument('-s', '--subfolders', action='store_true',
help='Look for images in sub folders')
parser.add_argument('-f', '--from', type = str,
help='Start from a given pattern')
parser.add_argument('-i', '--inpath', type = str,
help='Save folder of expert data also in the path of the image')
parser.add_argument('-m', '--modify', action='store_true',
help='')
parser.add_argument('-r', '--reset', action='store_true',
help='')
parser.add_argument('-v', '--review', action='store_true',
help='')
parser.add_argument('-a', '--ask', action='store_true',
help='')
parser.add_argument('-c', '--contents', type = str, default="*.*",
help='pattern to look in folder')
# parse sys and get argument variables
args = vars(parser.parse_args(args=args, namespace=namespace))
setspath = args.pop("path")[0]
bfrom = args.pop("from")
expertname = args.pop("output").format(path = setspath)
subfolders = args.pop("subfolders")
if subfolders:
def check_dir(path):
return os.path.isdir(path) and not path.endswith(expertname)
imsets = glob(os.path.join(setspath,"") + "*", check=check_dir) # only folders
else:
imsets = [setspath]
start = False
for imset in imsets:
if bfrom is None or bfrom in imset:
start = True
if start:
exp = Expert(imset,data=expertname,**args)
exp.start()
def crop_expert(fn, outpath = None, expertpath=None, loader=None, preview=None,
form = None, startfrom = None, name = None, modify=False, reset=False,
review = False, ask=False, help = False, ext = None):
"""
Crop input image and save ROIs
:param fn: file name
:param outpath: (None)
:param expertpath:
:param loader: (loadFunc(1))
:param preview: (rect)
:param form: crop shape type supported by :func:`getROI`
:param startfrom: start from an specific pattern in path
:param name: default name to give to cropped images
which includes (ImageItem,save_path) items
:param modify:
:param reset:
:param review:
:param ask:
:param help: help the user by providinf some coordinates (experimental)
:return: ROI object, list of transformations
"""
from RRtoolFC.GUI.forms import getROI
#from RRtoolbox.lib.arrayops import foreground
from RRtoolbox.tools.segmentation import retinal_mask
imsets = glob(fn) # only folders
if preview is None:
preview = True
if form is None:
form = "rect"
if loader is None:
loader = loadFunc(1)
start = False
for impath in imsets:
if startfrom is None or startfrom in impath:
start = True
if start:
image = loader(impath).astype(np.float32)
print("loaded",impath)
a,b,c,d = getData(impath)
if name is not None: # change names
c = name
if ext is not None: # change extension
if ext.startswith("."):
d = ext
else:
d = "."+ext
# get path to save ROIs
if outpath is None: # create default path
outpath2 = a+b+c # add new folder with name of the image
else: # custom sting path
outpath2 = outpath
if expertpath is None: # create default path
expertpath2 = os.path.join(outpath2,"_expert")
else: # custom sting path
expertpath2 = expertpath
# make path if it does not exists
mkPath(getPath(outpath2))
# save ROI
fn = os.path.join(outpath2,"_original_"+c+d)
if cv2.imwrite(fn, image):
print("Original image saved as {}".format(fn))
else:
"Original image {} could not be saved".format(fn)
fn = impath
# get expert data
exp = Expert(fn,data=expertpath2, shape=image.shape, modify=modify, reset=reset,
review = review, ask=ask, filter=os.path.isfile)
contours, _ = findContours(retinal_mask(image.copy()),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
main_key = getData(fn)[-2]+d
if help and main_key not in exp.data: # help the user by guessing initial threshold
exp.data[main_key] = {"fn":fn,"shape":image.shape[:2],"coors_retina":contours}
exp.start()
expertfield = list(exp.data.values())[0]
# get ROIs
while not input("get ROI?(y,n)").lower() in ("n","not","no"):
# get ROI
roi, crop = getROI(image, preview=preview, form= form, crop=False)
fn = increment_if_exits(os.path.join(outpath2,"{}{}".format(c,d)),force=True)
imroi = roi.getArrayRegion(image, crop)
# save ROI
if cv2.imwrite(fn, imroi):
print("Saved: {}".format(fn))
else:
"{} could not be saved".format(fn)
info = {}
# automatically calculate expert data from parent image
for field,val in list(expertfield.items()):
if field.startswith("coors_"):
mask = contours2mask(val,shape=image.shape)
mask = roi.getArrayRegion(mask, crop)
contours, _ = findContours(mask.astype(np.uint8),
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
info[field] = contours
elif "fn" == field:
info[field] = fn
elif "shape" == field:
info[field] = imroi.shape[:2]
else:
raise Exception("Expert structure not supported")
exp.data["".join(getData(fn)[-2:])] = info # register expert data of crop
if False and __name__ == "__main__": # for a folder with many sets
"""
Example using Expert class: generate expert data contained in results/ folder
"""
def check_dir(path):
return os.path.isdir(path) and not path.endswith(expertname)
setspath = "./results/"
expertname = "_expert"
imsets = glob(os.path.join(setspath,"") + "*", check=check_dir) # only folders
bfrom = None
start = False
for imset in imsets:
if bfrom is None or bfrom in imset:
start = True
if start:
exp = Expert(imset,data=os.path.join(setspath,expertname),inpath=expertname,review=False)
exp.start()
if False and __name__ == "__main__": # for a folder with many sets
"""
Example using crop_expert function: generate expert data from X image and save
perspectives in another path with expert data along.
"""
setpath = "/mnt/4E443F99443F82AF/MEGAsync/TESIS/DATA_RAW/DATABASE/Database_DRIVE/test/images/"
outpath = "./results/"
base_name = "test{}"
startfrom = "01_test"
loader=None
preview=True
form = "rect"
debug = True
for i,fn in enumerate(glob(setpath)):
name = base_name.format(i+1)
base_out = os.path.join(outpath,name)
crop_expert(fn=fn, outpath=base_out, startfrom=startfrom, help=True,
loader=loader, preview=preview, form= form, name=name, ext="png")
# debug automatic cropping with expert data
if debug:
print("################ DEBUG ################")
exp = Expert(base_out,data=os.path.join(base_out,"_expert"),review=True)
exp.start()
print("############## END DEBUG ##############")
if __name__ == "__main__":
"""
Call expert program from terminal
"""
shell("./results/ --subfolders".split()) # call expert shell
#shell() # call expert shell
|
en
| 0.670855
|
# loads image with shape of coordinates # update loaded shape # init default dictionary to format # get old coordinates # init new coordinates #if not coor_list: # ensures there is coors inisite coor_list # coor_list = [[]] Class to generate images expert data (experimental) :param path: :param data: :param modify: :param reset: :param review: :param inpath: :param ask: :param contents: :param filter: function to filter out files # ensures absolute path # mode 0 lest the user run all un-cached tests, # mode 1 lets the user run all tests and correct cached tests. # mode 2 lets is used to change the fields in the cached data # it should be dictionary # cache new data or replace previous test # register expert data # ensures that memoizedDicts are converted to dict Shell to run in terminal the expert data generator program :param args: (None) list of arguments. If None it captures the arguments in sys. :param namespace: (None) namespace to place variables. If None it creates a namespace. :return: namespace # parse sys and get argument variables # only folders Crop input image and save ROIs :param fn: file name :param outpath: (None) :param expertpath: :param loader: (loadFunc(1)) :param preview: (rect) :param form: crop shape type supported by :func:`getROI` :param startfrom: start from an specific pattern in path :param name: default name to give to cropped images which includes (ImageItem,save_path) items :param modify: :param reset: :param review: :param ask: :param help: help the user by providinf some coordinates (experimental) :return: ROI object, list of transformations #from RRtoolbox.lib.arrayops import foreground # only folders # change names # change extension # get path to save ROIs # create default path # add new folder with name of the image # custom sting path # create default path # custom sting path # make path if it does not exists # save ROI # get expert data # help the user by guessing initial threshold # get ROIs # get ROI # save ROI # automatically calculate expert data from parent image # register expert data of crop # for a folder with many sets Example using Expert class: generate expert data contained in results/ folder # only folders # for a folder with many sets Example using crop_expert function: generate expert data from X image and save perspectives in another path with expert data along. # debug automatic cropping with expert data ############### DEBUG ################") ############# END DEBUG ##############") Call expert program from terminal # call expert shell #shell() # call expert shell
| 2.21988
| 2
|
train.py
|
zouguojian/pollutant-prediction
| 0
|
6625627
|
# -- coding: utf-8 --
from __future__ import division
from __future__ import print_function
from spatial_temporal_model.hyparameter import parameter
from spatial_temporal_model.encoder import cnn_lstm
from model.decoder import Dcoderlstm
from model.utils import construct_feed_dict
from model.encoder import Encoderlstm
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import model.normalization as normalization
import spatial_temporal_model.process as data_load
import os
import argparse
tf.reset_default_graph()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
logs_path="board"
def embedding(inputs,
vocab_size,
num_units,
zero_pad=False,
scale=True,
scope="embedding",
reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0, stddev=1, seed=0))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units ** 0.5)
return outputs
class Model(object):
def __init__(self,para):
self.para=para
self.pollutant_id={'AQI':0, 'PM2.5':1,'PM10':3, 'SO2':5, 'NO2':7, 'O3':9, 'CO':13}
# define placeholders
self.placeholders = {
# None : batch _size * time _size
'month': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_month'),
'day': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_day'),
'hour': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_hour'),
'features1': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.site_num, self.para.features1],name='input_1'),
'features2': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.features2], name='input_2'),
'labels': tf.placeholder(tf.float32, shape=[None, self.para.output_length]),
'dropout': tf.placeholder_with_default(0., shape=()),
'is_training': tf.placeholder(tf.bool, shape=(),name='input_is_training'),
}
self.model()
def model(self):
'''
:param batch_size: 64
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: True
:return:
'''
with tf.variable_scope('month'):
self.m_emb = embedding(self.placeholders['month'], vocab_size=13, num_units=self.para.hidden_size,
scale=False, scope="month_embed")
print('d_emd shape is : ', self.m_emb.shape)
with tf.variable_scope('day'):
self.d_emb = embedding(self.placeholders['day'], vocab_size=32, num_units=self.para.hidden_size,
scale=False, scope="day_embed")
print('d_emd shape is : ', self.d_emb.shape)
with tf.variable_scope('hour'):
self.h_emb = embedding(self.placeholders['hour'], vocab_size=24, num_units=self.para.hidden_size,
scale=False, scope="hour_embed")
print('h_emd shape is : ', self.h_emb.shape)
# create model
# this step use to encoding the input series data
'''
rlstm, return --- for example ,output shape is :(32, 3, 128)
axis=0: bath size
axis=1: input data time size
axis=2: output feature size
'''
# shape is [batch, input length, embedding size]
emb=tf.add_n([self.m_emb,self.d_emb,self.h_emb])
# cnn时空特征提取
l = cnn_lstm(batch_size=self.para.batch_size,
layer_num=self.para.hidden_layer,
nodes=self.para.hidden_size,
highth=self.para.h,
width=self.para.w,
placeholders=self.placeholders)
# [batch, time ,hidden size]
(h_states1, c_states1) = l.encoding(self.placeholders['features1'], emb[:,:self.para.input_length,:])
print('h_states1 shape is : ', h_states1.shape)
# lstm 时序特征提取
encoder_init =Encoderlstm(self.para.batch_size,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
## [batch, time , hidden size]
(h_states2, c_states2) = encoder_init.encoding(self.placeholders['features2'], emb[:,:self.para.input_length,:])
print('h_states2 shape is : ', h_states2.shape)
h_states=tf.layers.dense(tf.concat([h_states1,h_states2],axis=-1),units=self.para.hidden_size, activation=tf.nn.relu, name='layers')
# this step to predict the pollutant concentration
'''
decoder, return --- for example ,output shape is :(32, 162, 1)
axis=0: bath size
axis=1: numbers of the nodes
axis=2: label size
'''
decoder_init = Dcoderlstm(self.para.batch_size,
self.para.output_length,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
self.pres = decoder_init.decoding(h_states, emb[:,self.para.input_length: ,:])
print('pres shape is : ', self.pres.shape)
self.cross_entropy = tf.reduce_mean(
tf.sqrt(tf.reduce_mean(tf.square(self.pres + 1e-10 - self.placeholders['labels']), axis=0)))
print(self.cross_entropy)
print('cross shape is : ',self.cross_entropy.shape)
tf.summary.scalar('cross_entropy',self.cross_entropy)
# backprocess and update the parameters
self.train_op = tf.train.AdamOptimizer(self.para.learning_rate).minimize(self.cross_entropy)
print('#...............................in the training step.....................................#')
def test(self):
'''
:param batch_size: usually use 1
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: False
:return:
'''
model_file = tf.train.latest_checkpoint('weights/')
self.saver.restore(self.sess, model_file)
def accuracy(self,label,predict):
'''
:param label: represents the observed value
:param predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error))
rmse_error = np.sqrt(np.mean(np.square(label - predict)))
print("rmse is : %.6f" % (rmse_error))
cor = np.mean(np.multiply((label - np.mean(label)),
(predict - np.mean(predict)))) / (np.std(predict) * np.std(label))
print('correlation coefficient is: %.6f' % (cor))
# mask = label != 0
# mape =np.mean(np.fabs((label[mask] - predict[mask]) / label[mask]))*100.0
# mape=np.mean(np.fabs((label - predict) / label)) * 100.0
# print('mape is: %.6f %' % (mape))
sse = np.sum((label - predict) ** 2)
sst = np.sum((label - np.mean(label)) ** 2)
R2 = 1 - sse / sst # r2_score(y_actual, y_predicted, multioutput='raw_values')
print('r^2 is: %.6f' % (R2))
return average_error,rmse_error,cor,R2
def describe(self,label,predict,prediction_size):
'''
:param label:
:param predict:
:param prediction_size:
:return:
'''
plt.figure()
# Label is observed value,Blue
plt.plot(label, 'b*:', label=u'actual value')
# Predict is predicted value,Red
plt.plot(predict, 'r*:', label=u'predicted value')
# use the legend
# plt.legend()
plt.xlabel("time(hours)", fontsize=17)
plt.ylabel("pm$_{2.5}$ (ug/m$^3$)", fontsize=17)
plt.title("the prediction of pm$_{2.5}", fontsize=17)
plt.show()
def initialize_session(self):
self.sess=tf.Session()
self.saver=tf.train.Saver(var_list=tf.trainable_variables())
def re_current(self, a, max, min):
return [num*(max-min)+min for num in a]
def run_epoch(self):
'''
from now on,the model begin to training, until the epoch to 100
'''
max_mae = 100
self.sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
self.iterate = data_load.DataIterator(site_id=self.para.target_site_id,
site_num=self.para.site_num,
pollutant_id=self.pollutant_id[self.para.pollutant_id],
is_training=self.para.is_training,
time_size=self.para.input_length,
prediction_size=self.para.output_length,
data_divide=self.para.data_divide,
window_step=self.para.step,
normalize=self.para.normalize)
next_elements=self.iterate.next_batch(batch_size=self.para.batch_size, epochs=self.para.epochs,is_training=True)
for i in range(int((self.iterate.train_p.shape[0] - (self.para.input_length + self.para.output_length))//self.para.step)
// self.para.batch_size * self.para.epochs):
x1, x2, m, d, h, label =self.sess.run(next_elements)
features1 = np.reshape(np.array(x1), [-1, self.para.input_length, self.para.site_num, self.para.features1])
features2 = np.reshape(np.array(x2), [-1, self.para.input_length, self.para.features2])
feed_dict = construct_feed_dict(features1, features2, m, d, h, label, self.placeholders)
feed_dict.update({self.placeholders['dropout']: self.para.dropout})
feed_dict.update({self.placeholders['is_training']: self.para.is_training})
summary, loss, _ = self.sess.run((merged,self.cross_entropy,self.train_op), feed_dict=feed_dict)
print("after %d steps,the training average loss value is : %.6f" % (i, loss))
# writer.add_summary(summary, loss)
# validate processing
if i % 50 == 0:
mae_error=self.evaluate()
if max_mae>mae_error:
print("the validate average mae loss value is : %.6f" % (mae_error))
max_mae=mae_error
self.saver.save(self.sess,save_path=self.para.save_path+'model.ckpt')
def evaluate(self):
'''
:param para:
:param pre_model:
:return:
'''
label_list = list()
predict_list = list()
#with tf.Session() as sess:
model_file = tf.train.latest_checkpoint(self.para.save_path)
if not self.para.is_training:
print('the model weights has been loaded:')
self.saver.restore(self.sess, model_file)
self.iterate_test = data_load.DataIterator(site_id=self.para.target_site_id,
site_num=self.para.site_num,
pollutant_id=self.pollutant_id[self.para.pollutant_id],
is_training=self.para.is_training,
time_size=self.para.input_length,
prediction_size=self.para.output_length,
data_divide=self.para.data_divide,
normalize=self.para.normalize)
next_ = self.iterate_test.next_batch(batch_size=self.para.batch_size, epochs=1,is_training=False)
max,min=self.iterate_test.max_p[self.pollutant_id[self.para.pollutant_id]],self.iterate_test.min_p[self.pollutant_id[self.para.pollutant_id]]
for i in range(int((self.iterate_test.test_p.shape[0] -(self.para.input_length+ self.para.output_length))//self.para.output_length)
// self.para.batch_size):
x1, x2, m, d, h, label =self.sess.run(next_)
features1 = np.reshape(np.array(x1), [-1, self.para.input_length, self.para.site_num, self.para.features1])
features2 = np.reshape(np.array(x2), [-1, self.para.input_length, self.para.features2])
feed_dict = construct_feed_dict(features1, features2, m, d, h, label, self.placeholders)
feed_dict.update({self.placeholders['dropout']: 0.0})
feed_dict.update({self.placeholders['is_training']: self.para.is_training})
pre = self.sess.run((self.pres), feed_dict=feed_dict)
label_list.append(label)
predict_list.append(pre)
label_list=np.reshape(np.array(label_list,dtype=np.float32),[-1, self.para.output_length])
predict_list=np.reshape(np.array(predict_list,dtype=np.float32),[-1, self.para.output_length])
if self.para.normalize:
label_list = np.array([self.re_current(row,max,min) for row in label_list])
predict_list = np.array([self.re_current(row,max,min) for row in predict_list])
else:
label_list = np.array([row for row in label_list])
predict_list = np.array([row for row in predict_list])
# np.savetxt('results/results_label.txt',label_list,'%.3f')
# np.savetxt('results/results_predict.txt', predict_list, '%.3f')
label_list=np.reshape(label_list,[-1])
predict_list=np.reshape(predict_list,[-1])
print(label_list)
print(predict_list)
average_error, rmse_error, cor, R2= self.accuracy(label_list, predict_list) #产生预测指标
# self.describe(label_list, predict_list, self.para.output_length) #预测值可视化
return average_error
def main(argv=None):
'''
:param argv:
:return:
'''
print('#......................................beginning........................................#')
para = parameter(argparse.ArgumentParser())
para = para.get_para()
print('Please input a number : 1 or 0. (1 and 0 represents the training or testing, respectively).')
val = input('please input the number : ')
if int(val) == 1:para.is_training = True
else:
para.batch_size=1
para.is_training = False
pre_model = Model(para)
pre_model.initialize_session()
if int(val) == 1:pre_model.run_epoch()
else:
pre_model.evaluate()
print('#...................................finished............................................#')
if __name__ == '__main__':
main()
|
# -- coding: utf-8 --
from __future__ import division
from __future__ import print_function
from spatial_temporal_model.hyparameter import parameter
from spatial_temporal_model.encoder import cnn_lstm
from model.decoder import Dcoderlstm
from model.utils import construct_feed_dict
from model.encoder import Encoderlstm
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import model.normalization as normalization
import spatial_temporal_model.process as data_load
import os
import argparse
tf.reset_default_graph()
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
logs_path="board"
def embedding(inputs,
vocab_size,
num_units,
zero_pad=False,
scale=True,
scope="embedding",
reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0, stddev=1, seed=0))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units ** 0.5)
return outputs
class Model(object):
def __init__(self,para):
self.para=para
self.pollutant_id={'AQI':0, 'PM2.5':1,'PM10':3, 'SO2':5, 'NO2':7, 'O3':9, 'CO':13}
# define placeholders
self.placeholders = {
# None : batch _size * time _size
'month': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_month'),
'day': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_day'),
'hour': tf.placeholder(tf.int32, shape=(None, self.para.input_length+self.para.output_length), name='input_hour'),
'features1': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.site_num, self.para.features1],name='input_1'),
'features2': tf.placeholder(tf.float32, shape=[None, self.para.input_length, self.para.features2], name='input_2'),
'labels': tf.placeholder(tf.float32, shape=[None, self.para.output_length]),
'dropout': tf.placeholder_with_default(0., shape=()),
'is_training': tf.placeholder(tf.bool, shape=(),name='input_is_training'),
}
self.model()
def model(self):
'''
:param batch_size: 64
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: True
:return:
'''
with tf.variable_scope('month'):
self.m_emb = embedding(self.placeholders['month'], vocab_size=13, num_units=self.para.hidden_size,
scale=False, scope="month_embed")
print('d_emd shape is : ', self.m_emb.shape)
with tf.variable_scope('day'):
self.d_emb = embedding(self.placeholders['day'], vocab_size=32, num_units=self.para.hidden_size,
scale=False, scope="day_embed")
print('d_emd shape is : ', self.d_emb.shape)
with tf.variable_scope('hour'):
self.h_emb = embedding(self.placeholders['hour'], vocab_size=24, num_units=self.para.hidden_size,
scale=False, scope="hour_embed")
print('h_emd shape is : ', self.h_emb.shape)
# create model
# this step use to encoding the input series data
'''
rlstm, return --- for example ,output shape is :(32, 3, 128)
axis=0: bath size
axis=1: input data time size
axis=2: output feature size
'''
# shape is [batch, input length, embedding size]
emb=tf.add_n([self.m_emb,self.d_emb,self.h_emb])
# cnn时空特征提取
l = cnn_lstm(batch_size=self.para.batch_size,
layer_num=self.para.hidden_layer,
nodes=self.para.hidden_size,
highth=self.para.h,
width=self.para.w,
placeholders=self.placeholders)
# [batch, time ,hidden size]
(h_states1, c_states1) = l.encoding(self.placeholders['features1'], emb[:,:self.para.input_length,:])
print('h_states1 shape is : ', h_states1.shape)
# lstm 时序特征提取
encoder_init =Encoderlstm(self.para.batch_size,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
## [batch, time , hidden size]
(h_states2, c_states2) = encoder_init.encoding(self.placeholders['features2'], emb[:,:self.para.input_length,:])
print('h_states2 shape is : ', h_states2.shape)
h_states=tf.layers.dense(tf.concat([h_states1,h_states2],axis=-1),units=self.para.hidden_size, activation=tf.nn.relu, name='layers')
# this step to predict the pollutant concentration
'''
decoder, return --- for example ,output shape is :(32, 162, 1)
axis=0: bath size
axis=1: numbers of the nodes
axis=2: label size
'''
decoder_init = Dcoderlstm(self.para.batch_size,
self.para.output_length,
self.para.hidden_layer,
self.para.hidden_size,
placeholders=self.placeholders)
self.pres = decoder_init.decoding(h_states, emb[:,self.para.input_length: ,:])
print('pres shape is : ', self.pres.shape)
self.cross_entropy = tf.reduce_mean(
tf.sqrt(tf.reduce_mean(tf.square(self.pres + 1e-10 - self.placeholders['labels']), axis=0)))
print(self.cross_entropy)
print('cross shape is : ',self.cross_entropy.shape)
tf.summary.scalar('cross_entropy',self.cross_entropy)
# backprocess and update the parameters
self.train_op = tf.train.AdamOptimizer(self.para.learning_rate).minimize(self.cross_entropy)
print('#...............................in the training step.....................................#')
def test(self):
'''
:param batch_size: usually use 1
:param encoder_layer:
:param decoder_layer:
:param encoder_nodes:
:param prediction_size:
:param is_training: False
:return:
'''
model_file = tf.train.latest_checkpoint('weights/')
self.saver.restore(self.sess, model_file)
def accuracy(self,label,predict):
'''
:param label: represents the observed value
:param predict: represents the predicted value
:param epoch:
:param steps:
:return:
'''
error = label - predict
average_error = np.mean(np.fabs(error.astype(float)))
print("mae is : %.6f" % (average_error))
rmse_error = np.sqrt(np.mean(np.square(label - predict)))
print("rmse is : %.6f" % (rmse_error))
cor = np.mean(np.multiply((label - np.mean(label)),
(predict - np.mean(predict)))) / (np.std(predict) * np.std(label))
print('correlation coefficient is: %.6f' % (cor))
# mask = label != 0
# mape =np.mean(np.fabs((label[mask] - predict[mask]) / label[mask]))*100.0
# mape=np.mean(np.fabs((label - predict) / label)) * 100.0
# print('mape is: %.6f %' % (mape))
sse = np.sum((label - predict) ** 2)
sst = np.sum((label - np.mean(label)) ** 2)
R2 = 1 - sse / sst # r2_score(y_actual, y_predicted, multioutput='raw_values')
print('r^2 is: %.6f' % (R2))
return average_error,rmse_error,cor,R2
def describe(self,label,predict,prediction_size):
'''
:param label:
:param predict:
:param prediction_size:
:return:
'''
plt.figure()
# Label is observed value,Blue
plt.plot(label, 'b*:', label=u'actual value')
# Predict is predicted value,Red
plt.plot(predict, 'r*:', label=u'predicted value')
# use the legend
# plt.legend()
plt.xlabel("time(hours)", fontsize=17)
plt.ylabel("pm$_{2.5}$ (ug/m$^3$)", fontsize=17)
plt.title("the prediction of pm$_{2.5}", fontsize=17)
plt.show()
def initialize_session(self):
self.sess=tf.Session()
self.saver=tf.train.Saver(var_list=tf.trainable_variables())
def re_current(self, a, max, min):
return [num*(max-min)+min for num in a]
def run_epoch(self):
'''
from now on,the model begin to training, until the epoch to 100
'''
max_mae = 100
self.sess.run(tf.global_variables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(logs_path,graph=tf.get_default_graph())
self.iterate = data_load.DataIterator(site_id=self.para.target_site_id,
site_num=self.para.site_num,
pollutant_id=self.pollutant_id[self.para.pollutant_id],
is_training=self.para.is_training,
time_size=self.para.input_length,
prediction_size=self.para.output_length,
data_divide=self.para.data_divide,
window_step=self.para.step,
normalize=self.para.normalize)
next_elements=self.iterate.next_batch(batch_size=self.para.batch_size, epochs=self.para.epochs,is_training=True)
for i in range(int((self.iterate.train_p.shape[0] - (self.para.input_length + self.para.output_length))//self.para.step)
// self.para.batch_size * self.para.epochs):
x1, x2, m, d, h, label =self.sess.run(next_elements)
features1 = np.reshape(np.array(x1), [-1, self.para.input_length, self.para.site_num, self.para.features1])
features2 = np.reshape(np.array(x2), [-1, self.para.input_length, self.para.features2])
feed_dict = construct_feed_dict(features1, features2, m, d, h, label, self.placeholders)
feed_dict.update({self.placeholders['dropout']: self.para.dropout})
feed_dict.update({self.placeholders['is_training']: self.para.is_training})
summary, loss, _ = self.sess.run((merged,self.cross_entropy,self.train_op), feed_dict=feed_dict)
print("after %d steps,the training average loss value is : %.6f" % (i, loss))
# writer.add_summary(summary, loss)
# validate processing
if i % 50 == 0:
mae_error=self.evaluate()
if max_mae>mae_error:
print("the validate average mae loss value is : %.6f" % (mae_error))
max_mae=mae_error
self.saver.save(self.sess,save_path=self.para.save_path+'model.ckpt')
def evaluate(self):
'''
:param para:
:param pre_model:
:return:
'''
label_list = list()
predict_list = list()
#with tf.Session() as sess:
model_file = tf.train.latest_checkpoint(self.para.save_path)
if not self.para.is_training:
print('the model weights has been loaded:')
self.saver.restore(self.sess, model_file)
self.iterate_test = data_load.DataIterator(site_id=self.para.target_site_id,
site_num=self.para.site_num,
pollutant_id=self.pollutant_id[self.para.pollutant_id],
is_training=self.para.is_training,
time_size=self.para.input_length,
prediction_size=self.para.output_length,
data_divide=self.para.data_divide,
normalize=self.para.normalize)
next_ = self.iterate_test.next_batch(batch_size=self.para.batch_size, epochs=1,is_training=False)
max,min=self.iterate_test.max_p[self.pollutant_id[self.para.pollutant_id]],self.iterate_test.min_p[self.pollutant_id[self.para.pollutant_id]]
for i in range(int((self.iterate_test.test_p.shape[0] -(self.para.input_length+ self.para.output_length))//self.para.output_length)
// self.para.batch_size):
x1, x2, m, d, h, label =self.sess.run(next_)
features1 = np.reshape(np.array(x1), [-1, self.para.input_length, self.para.site_num, self.para.features1])
features2 = np.reshape(np.array(x2), [-1, self.para.input_length, self.para.features2])
feed_dict = construct_feed_dict(features1, features2, m, d, h, label, self.placeholders)
feed_dict.update({self.placeholders['dropout']: 0.0})
feed_dict.update({self.placeholders['is_training']: self.para.is_training})
pre = self.sess.run((self.pres), feed_dict=feed_dict)
label_list.append(label)
predict_list.append(pre)
label_list=np.reshape(np.array(label_list,dtype=np.float32),[-1, self.para.output_length])
predict_list=np.reshape(np.array(predict_list,dtype=np.float32),[-1, self.para.output_length])
if self.para.normalize:
label_list = np.array([self.re_current(row,max,min) for row in label_list])
predict_list = np.array([self.re_current(row,max,min) for row in predict_list])
else:
label_list = np.array([row for row in label_list])
predict_list = np.array([row for row in predict_list])
# np.savetxt('results/results_label.txt',label_list,'%.3f')
# np.savetxt('results/results_predict.txt', predict_list, '%.3f')
label_list=np.reshape(label_list,[-1])
predict_list=np.reshape(predict_list,[-1])
print(label_list)
print(predict_list)
average_error, rmse_error, cor, R2= self.accuracy(label_list, predict_list) #产生预测指标
# self.describe(label_list, predict_list, self.para.output_length) #预测值可视化
return average_error
def main(argv=None):
'''
:param argv:
:return:
'''
print('#......................................beginning........................................#')
para = parameter(argparse.ArgumentParser())
para = para.get_para()
print('Please input a number : 1 or 0. (1 and 0 represents the training or testing, respectively).')
val = input('please input the number : ')
if int(val) == 1:para.is_training = True
else:
para.batch_size=1
para.is_training = False
pre_model = Model(para)
pre_model.initialize_session()
if int(val) == 1:pre_model.run_epoch()
else:
pre_model.evaluate()
print('#...................................finished............................................#')
if __name__ == '__main__':
main()
|
en
| 0.516015
|
# -- coding: utf-8 -- Embeds a given tensor. Args: inputs: A `Tensor` with type `int32` or `int64` containing the ids to be looked up in `lookup table`. vocab_size: An int. Vocabulary size. num_units: An int. Number of embedding hidden units. zero_pad: A boolean. If True, all the values of the fist row (id 0) should be constant zeros. scale: A boolean. If True. the outputs is multiplied by sqrt num_units. scope: Optional scope for `variable_scope`. reuse: Boolean, whether to reuse the weights of a previous layer by the same name. Returns: A `Tensor` with one more rank than inputs's. The last dimensionality should be `num_units`. For example, ``` import tensorflow as tf inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3))) outputs = embedding(inputs, 6, 2, zero_pad=True) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print sess.run(outputs) >> [[[ 0. 0. ] [ 0.09754146 0.67385566] [ 0.37864095 -0.35689294]] [[-1.01329422 -1.09939694] [ 0.7521342 0.38203377] [-0.04973143 -0.06210355]]] ``` ``` import tensorflow as tf inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3))) outputs = embedding(inputs, 6, 2, zero_pad=False) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) print sess.run(outputs) >> [[[-0.19172323 -0.39159766] [-0.43212751 -0.66207761] [ 1.03452027 -0.26704335]] [[-0.11634696 -0.35983452] [ 0.50208133 0.53509563] [ 1.22204471 -0.96587461]]] ``` # define placeholders # None : batch _size * time _size :param batch_size: 64 :param encoder_layer: :param decoder_layer: :param encoder_nodes: :param prediction_size: :param is_training: True :return: # create model # this step use to encoding the input series data rlstm, return --- for example ,output shape is :(32, 3, 128) axis=0: bath size axis=1: input data time size axis=2: output feature size # shape is [batch, input length, embedding size] # cnn时空特征提取 # [batch, time ,hidden size] # lstm 时序特征提取 ## [batch, time , hidden size] # this step to predict the pollutant concentration decoder, return --- for example ,output shape is :(32, 162, 1) axis=0: bath size axis=1: numbers of the nodes axis=2: label size # backprocess and update the parameters #') :param batch_size: usually use 1 :param encoder_layer: :param decoder_layer: :param encoder_nodes: :param prediction_size: :param is_training: False :return: :param label: represents the observed value :param predict: represents the predicted value :param epoch: :param steps: :return: # mask = label != 0 # mape =np.mean(np.fabs((label[mask] - predict[mask]) / label[mask]))*100.0 # mape=np.mean(np.fabs((label - predict) / label)) * 100.0 # print('mape is: %.6f %' % (mape)) # r2_score(y_actual, y_predicted, multioutput='raw_values') :param label: :param predict: :param prediction_size: :return: # Label is observed value,Blue # Predict is predicted value,Red # use the legend # plt.legend() from now on,the model begin to training, until the epoch to 100 # writer.add_summary(summary, loss) # validate processing :param para: :param pre_model: :return: #with tf.Session() as sess: # np.savetxt('results/results_label.txt',label_list,'%.3f') # np.savetxt('results/results_predict.txt', predict_list, '%.3f') #产生预测指标 # self.describe(label_list, predict_list, self.para.output_length) #预测值可视化 :param argv: :return: #') #')
| 2.536348
| 3
|
dbutils/convert.py
|
libremente/service-app
| 2
|
6625628
|
import json
import argparse
"""
This script is usefull to convert json exported with datagrip
in json importable with insertMany
Usage
python3 convert.py file.name.json
"""
parser = argparse.ArgumentParser()
parser.add_argument("fname")
args = parser.parse_args()
with open(args.fname, 'r', encoding='utf-8') as infile:
data = json.load(infile)
for row in data:
row.pop("_id")
v = row['create_datetime']["$date"]
row['create_datetime'] = v
v1 = row['update_datetime']["$date"]
row['update_datetime'] = v1
with open(args.fname, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
|
import json
import argparse
"""
This script is usefull to convert json exported with datagrip
in json importable with insertMany
Usage
python3 convert.py file.name.json
"""
parser = argparse.ArgumentParser()
parser.add_argument("fname")
args = parser.parse_args()
with open(args.fname, 'r', encoding='utf-8') as infile:
data = json.load(infile)
for row in data:
row.pop("_id")
v = row['create_datetime']["$date"]
row['create_datetime'] = v
v1 = row['update_datetime']["$date"]
row['update_datetime'] = v1
with open(args.fname, 'w', encoding='utf-8') as outfile:
json.dump(data, outfile, ensure_ascii=False)
|
en
| 0.576708
|
This script is usefull to convert json exported with datagrip in json importable with insertMany Usage python3 convert.py file.name.json
| 3.119334
| 3
|
src/sv-pipeline/03_variant_filtering/scripts/rewrite_SR_coords.py
|
shyamrav/gatk-sv
| 1
|
6625629
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
"""
import argparse
import sys
import pysam
import pandas as pd
def rewrite_SR_coords(record, metrics, pval_cutoff, bg_cutoff):
row = metrics.loc[record.id]
if row.SR_sum_log_pval >= pval_cutoff and row.SR_sum_bg_frac >= bg_cutoff:
record.pos = int(row.SR_posA_pos)
record.stop = int(row.SR_posB_pos)
if record.info['SVTYPE'] == 'INV':
record.pos, record.stop = sorted([record.pos, record.stop])
if record.info['SVTYPE'] not in 'INS BND'.split():
record.info['SVLEN'] = record.stop - record.pos
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf')
parser.add_argument('metrics')
parser.add_argument('cutoffs')
parser.add_argument('fout')
args = parser.parse_args()
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = pysam.VariantFile(sys.stdout, 'w', header=vcf.header)
else:
fout = pysam.VariantFile(args.fout, 'w', header=vcf.header)
# Load metrics
metrics = pd.read_table(args.metrics).drop_duplicates()
records = [r for r in vcf]
IDs = [r.id for r in records]
metrics = metrics.loc[metrics.name.isin(IDs)].copy()
metrics = metrics.set_index('name')
# Load cutoffs
cutoffs = pd.read_table(args.cutoffs)
pval_cutoff = cutoffs.loc[(cutoffs['test'] == 'SR1') &
(cutoffs['metric'] == 'SR_sum_log_pval'), 'cutoff'].iloc[0]
bg_cutoff = cutoffs.loc[(cutoffs['test'] == 'SR1') &
(cutoffs['metric'] == 'SR_sum_bg_frac'), 'cutoff'].iloc[0]
for record in records:
rewrite_SR_coords(record, metrics, pval_cutoff, bg_cutoff)
if record.info['SVTYPE'] in 'DEL DUP'.split():
if record.info['SVLEN'] < 50:
continue
fout.write(record)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
"""
import argparse
import sys
import pysam
import pandas as pd
def rewrite_SR_coords(record, metrics, pval_cutoff, bg_cutoff):
row = metrics.loc[record.id]
if row.SR_sum_log_pval >= pval_cutoff and row.SR_sum_bg_frac >= bg_cutoff:
record.pos = int(row.SR_posA_pos)
record.stop = int(row.SR_posB_pos)
if record.info['SVTYPE'] == 'INV':
record.pos, record.stop = sorted([record.pos, record.stop])
if record.info['SVTYPE'] not in 'INS BND'.split():
record.info['SVLEN'] = record.stop - record.pos
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('vcf')
parser.add_argument('metrics')
parser.add_argument('cutoffs')
parser.add_argument('fout')
args = parser.parse_args()
vcf = pysam.VariantFile(args.vcf)
if args.fout in '- stdout'.split():
fout = pysam.VariantFile(sys.stdout, 'w', header=vcf.header)
else:
fout = pysam.VariantFile(args.fout, 'w', header=vcf.header)
# Load metrics
metrics = pd.read_table(args.metrics).drop_duplicates()
records = [r for r in vcf]
IDs = [r.id for r in records]
metrics = metrics.loc[metrics.name.isin(IDs)].copy()
metrics = metrics.set_index('name')
# Load cutoffs
cutoffs = pd.read_table(args.cutoffs)
pval_cutoff = cutoffs.loc[(cutoffs['test'] == 'SR1') &
(cutoffs['metric'] == 'SR_sum_log_pval'), 'cutoff'].iloc[0]
bg_cutoff = cutoffs.loc[(cutoffs['test'] == 'SR1') &
(cutoffs['metric'] == 'SR_sum_bg_frac'), 'cutoff'].iloc[0]
for record in records:
rewrite_SR_coords(record, metrics, pval_cutoff, bg_cutoff)
if record.info['SVTYPE'] in 'DEL DUP'.split():
if record.info['SVLEN'] < 50:
continue
fout.write(record)
if __name__ == '__main__':
main()
|
en
| 0.430149
|
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Load metrics # Load cutoffs
| 2.325112
| 2
|
utils/metrics.py
|
IvoOVerhoeven/meta-learning-emotion-detection
| 3
|
6625630
|
<gh_stars>1-10
import torch
import torch.nn
import torch.nn.functional as F
def logits_to_preds(logits):
probs = F.softmax(logits, dim=-1)
preds = torch.argmax(probs, dim=1)
return preds
def accuracy(preds, labels):
return (preds == labels).float().mean()
def confusion_matrix(preds, labels, n_classes):
conf_mat = torch.zeros(n_classes, n_classes)
for i, j in zip(preds, labels):
conf_mat[i, j] += 1
return conf_mat
def precision(conf_mat):
return torch.nan_to_num(torch.diagonal(conf_mat) / torch.sum(conf_mat, dim=1))
def recall(conf_mat):
return torch.nan_to_num(torch.diagonal(conf_mat) / torch.sum(conf_mat, dim=0))
def f1(conf_mat):
pre = precision(conf_mat)
rec = recall(conf_mat)
return torch.nan_to_num(2 * (pre * rec) / (pre + rec))
def logging_metrics(logits, labels):
preds = logits_to_preds(logits)
acc = accuracy(preds, labels)
conf_mat = confusion_matrix(preds, labels, logits.size(-1))
f1_macro = torch.mean(f1(conf_mat))
return {'acc': acc.cpu().item(), 'f1': f1_macro.cpu().item()}
|
import torch
import torch.nn
import torch.nn.functional as F
def logits_to_preds(logits):
probs = F.softmax(logits, dim=-1)
preds = torch.argmax(probs, dim=1)
return preds
def accuracy(preds, labels):
return (preds == labels).float().mean()
def confusion_matrix(preds, labels, n_classes):
conf_mat = torch.zeros(n_classes, n_classes)
for i, j in zip(preds, labels):
conf_mat[i, j] += 1
return conf_mat
def precision(conf_mat):
return torch.nan_to_num(torch.diagonal(conf_mat) / torch.sum(conf_mat, dim=1))
def recall(conf_mat):
return torch.nan_to_num(torch.diagonal(conf_mat) / torch.sum(conf_mat, dim=0))
def f1(conf_mat):
pre = precision(conf_mat)
rec = recall(conf_mat)
return torch.nan_to_num(2 * (pre * rec) / (pre + rec))
def logging_metrics(logits, labels):
preds = logits_to_preds(logits)
acc = accuracy(preds, labels)
conf_mat = confusion_matrix(preds, labels, logits.size(-1))
f1_macro = torch.mean(f1(conf_mat))
return {'acc': acc.cpu().item(), 'f1': f1_macro.cpu().item()}
|
none
| 1
| 2.489966
| 2
|
|
containers/ipython.py
|
colaboratory-team/backend-container
| 4
|
6625631
|
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IPython configuration for Colab."""
c = get_config() # pylint:disable=undefined-variable
# Register a custom kernel_class.
c.IPKernelApp.kernel_class = 'google.colab._kernel.Kernel'
# Implicitly imported packages.
c.InteractiveShellApp.extensions = [
'google.cloud.bigquery',
'matplotlib',
'seaborn',
]
ENABLE_ALTAIR = """
import altair
try:
altair.renderers.enable('colab')
finally:
del altair
"""
# Startup code.
c.InteractiveShellApp.exec_lines = [
'from google.colab import _shell_customizations',
'_shell_customizations.initialize()',
# TODO(b/72409705): Remove this extra import.
'import h5py',
ENABLE_ALTAIR,
]
# Enable matplotlib renderings to show up inline in the notebook.
c.InteractiveShellApp.matplotlib = 'inline'
|
# Copyright 2017 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IPython configuration for Colab."""
c = get_config() # pylint:disable=undefined-variable
# Register a custom kernel_class.
c.IPKernelApp.kernel_class = 'google.colab._kernel.Kernel'
# Implicitly imported packages.
c.InteractiveShellApp.extensions = [
'google.cloud.bigquery',
'matplotlib',
'seaborn',
]
ENABLE_ALTAIR = """
import altair
try:
altair.renderers.enable('colab')
finally:
del altair
"""
# Startup code.
c.InteractiveShellApp.exec_lines = [
'from google.colab import _shell_customizations',
'_shell_customizations.initialize()',
# TODO(b/72409705): Remove this extra import.
'import h5py',
ENABLE_ALTAIR,
]
# Enable matplotlib renderings to show up inline in the notebook.
c.InteractiveShellApp.matplotlib = 'inline'
|
en
| 0.745327
|
# Copyright 2017 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IPython configuration for Colab. # pylint:disable=undefined-variable # Register a custom kernel_class. # Implicitly imported packages. import altair try: altair.renderers.enable('colab') finally: del altair # Startup code. # TODO(b/72409705): Remove this extra import. # Enable matplotlib renderings to show up inline in the notebook.
| 2.120105
| 2
|
examples/singleobjective/CEC2013/diversity_de_cec2013.py
|
pedronarloch/jMetalPy_phD
| 0
|
6625632
|
import os
import sys
import pandas as pd
import seaborn as sns
import jmetal.analysis.plot as jPlot
from jmetal.algorithm.singleobjective.differential_evolution import DifferentialEvolutionReset
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import PolynomialMutation
from jmetal.operator.boundary_correction import RandomCorrection
from jmetal.operator.crossover import DifferentialEvolutionCrossover
from jmetal.operator.selection import DifferentialEvolutionBestMinSelection, DifferentialEvolutionSelection
from jmetal.problem.singleobjective.CEC2013_pygmo import CEC2013
from jmetal.util.observer import MechanismProbabilitiesObserver
from jmetal.util.observer import PrintObjectivesObserver, DimensionWiseDiversityObserver, PhenotypicDiversityObserver
from jmetal.util.solutions.generator import ArchiveInjector
from jmetal.util.termination_criterion import StoppingByEvaluations
def create_data_frames(objective_dict, gdm_dict, xpl_dict, xpt_dict, solution_dict):
df_gdm = pd.DataFrame.from_dict(gdm_dict, orient='index')
df_gdm = df_gdm.transpose()
df_xpl = pd.DataFrame.from_dict(xpl_dict, orient='index')
df_xpl = df_xpl.transpose()
df_xpt = pd.DataFrame.from_dict(xpt_dict, orient='index')
df_xpt = df_xpt.transpose()
df_objective = pd.DataFrame.from_dict(objective_dict, orient='index')
df_objective = df_objective.transpose()
df_solutions = pd.DataFrame.from_dict(solution_dict, orient='index')
df_solutions = df_solutions.transpose()
return df_objective, df_gdm, df_xpt, df_xpl, df_solutions
def run_algorithm(trials, algorithm):
prob_observer = MechanismProbabilitiesObserver(termination_criteria.get_criterion())
obj_observer = PrintObjectivesObserver(frequency=algorithm.population_size)
dwd_observer = DimensionWiseDiversityObserver(termination_criteria.get_criterion())
ph_observer = PhenotypicDiversityObserver(termination_criteria.get_criterion())
algorithm.observable.register(prob_observer)
algorithm.observable.register(obj_observer)
algorithm.observable.register(dwd_observer)
algorithm.observable.register(ph_observer)
diversity = {}
energy = {}
dwd_xpl = {}
dwd_xpt = {}
final_solution = {}
explore = {}
exploit = {}
do_nothing = {}
diversification_counter = {}
intensification_counter = {}
for i in range(trials):
prob_observer.__init__(termination_criteria.get_criterion())
obj_observer.__init__(frequency=algorithm.population_size)
dwd_observer.__init__(termination_criteria.get_criterion())
ph_observer.__init__(termination_criteria.get_criterion())
#file_path = '/home/pnarloch/workspace/resources/populations/CEC2013/function_' + str(p) + '/population_' \
# + str(i) + '.txt'
# population_generator = ArchiveInjector(file_path=file_path, problem=problem)
# algorithm.population_generator = population_generator
algorithm.run()
idx = 'run-' + str(i + 1)
energy[idx] = obj_observer.fitness_history
dwd_xpl[idx] = dwd_observer.xpl_history
dwd_xpt[idx] = dwd_observer.xpt_history
final_solution[idx] = algorithm.get_best_solution().variables
diversification_counter[idx] = algorithm.diversification_count
intensification_counter[idx] = algorithm.intensification_count
explore[idx] = prob_observer.diversfication_history
exploit[idx] = prob_observer.intensification_history
do_nothing[idx] = prob_observer.do_nothing_history
return energy, diversity, dwd_xpl, dwd_xpt, final_solution, explore, exploit, do_nothing, \
diversification_counter, intensification_counter
if __name__ == '__main__':
sns.set(style="darkgrid")
p = int(sys.argv[1])
d = 10
problem = CEC2013(p, d)
trials = 10
max_evaluations = d * 10000
pop_size = 100
termination_criteria = StoppingByEvaluations(max_evaluations)
selection_operator = DifferentialEvolutionSelection()
best_selection_operator = DifferentialEvolutionBestMinSelection()
crossover_operator = DifferentialEvolutionCrossover(0.9, 0.5, 0.0)
local_search_algorithm = SimulatedAnnealing(
problem=problem,
mutation=PolynomialMutation(probability=0.1, distribution_index=20.0),
termination_criterion=StoppingByEvaluations(max=500)
)
algorithm = DifferentialEvolutionReset(problem=problem,
population_size=pop_size,
termination_criterion=termination_criteria)
algorithm.selection_operator = selection_operator
algorithm.crossover_operator = crossover_operator
algorithm.crossover_operator.boundary_correction = RandomCorrection()
# Random Trials
fitness_indexes, diversity_indexes, xpl, xpt, solutions, explore, exploit, do_nothing, \
diversification_counter, intensification_counter = run_algorithm(trials,
algorithm)
df_fitness, df_gdm, df_xpt, df_xpl, df_solutions = create_data_frames(fitness_indexes,
diversity_indexes,
xpl_dict=xpl,
xpt_dict=xpt,
solution_dict=solutions)
df_explore = pd.DataFrame.from_dict(explore, orient="index")
df_explore = df_explore.transpose()
df_exploit = pd.DataFrame.from_dict(exploit, orient="index")
df_exploit = df_exploit.transpose()
df_do_nothing = pd.DataFrame.from_dict(do_nothing, orient="index")
df_do_nothing = df_do_nothing.transpose()
df_intensification = pd.DataFrame.from_dict(intensification_counter, orient='index')
df_intensification = df_intensification.transpose()
df_diversification = pd.DataFrame.from_dict(diversification_counter, orient='index')
df_diversification = df_diversification.diversification.transpose()
result_path = '/home/pnarloch/workspace/CEC2014/f_' + str(p) + '/'
if not os.path.exists(result_path):
os.mkdir(result_path)
plot_file = result_path + problem.get_name() + '_D' + str(d) + '_' \
+ algorithm.selection_operator.get_name() + '_' + algorithm.get_name() + "_SA"
df_fitness.to_csv(plot_file + '_fitness.csv')
df_xpt.to_csv(plot_file + '_xpt.csv')
df_xpl.to_csv(plot_file + '_xpl.csv')
df_gdm.to_csv(plot_file + '_gdm.csv')
df_solutions.to_csv(plot_file + '_solutions.csv')
df_explore.to_csv(plot_file + '_explore.csv')
df_exploit.to_csv(plot_file + '_exploit.csv')
df_do_nothing.to_csv(plot_file + '_nothing.csv')
df_diversification.to_csv(plot_file + '_diversification.csv')
df_diversification.intensification.to_csv(plot_file + '_intensification.csv')
''' Plotting Energies + Mean '''
jMetalPlot = jPlot.LineChart(df_fitness, "Fitness " + problem.get_name(), "Generations",
"Fitness", plot_file + "_fitness.png")
jMetalPlot.plot()
jMetalPlot.save_plot()
''' Plotting XPL/XPT/GDM for each run '''
for col in df_gdm:
_df = pd.DataFrame()
_df['GDM'] = df_gdm[col]
_df['XPL'] = df_xpl[col]
_df['XPT'] = df_xpt[col]
jMetalPlot.__init__(_df, "Diversity " + problem.get_name() + ' - ' + col, 'Generations',
'Diversity Index', plot_file + '_diversity_' + col + '.png')
jMetalPlot.plot()
jMetalPlot.save_plot()
for col in df_explore:
_df = pd.DataFrame()
_df['Explore'] = df_explore[col]
_df['Exploit'] = df_exploit[col]
_df['Nothing'] = df_do_nothing[col]
jMetalPlot.__init__(_df, "Strategy Probabilities " + problem.get_name() + ' - ' + col, 'Generation',
'Probabilities', plot_file + '_strategy_probability_' + col + '.png')
jMetalPlot.plot()
jMetalPlot.save_plot()
|
import os
import sys
import pandas as pd
import seaborn as sns
import jmetal.analysis.plot as jPlot
from jmetal.algorithm.singleobjective.differential_evolution import DifferentialEvolutionReset
from jmetal.algorithm.singleobjective.simulated_annealing import SimulatedAnnealing
from jmetal.operator import PolynomialMutation
from jmetal.operator.boundary_correction import RandomCorrection
from jmetal.operator.crossover import DifferentialEvolutionCrossover
from jmetal.operator.selection import DifferentialEvolutionBestMinSelection, DifferentialEvolutionSelection
from jmetal.problem.singleobjective.CEC2013_pygmo import CEC2013
from jmetal.util.observer import MechanismProbabilitiesObserver
from jmetal.util.observer import PrintObjectivesObserver, DimensionWiseDiversityObserver, PhenotypicDiversityObserver
from jmetal.util.solutions.generator import ArchiveInjector
from jmetal.util.termination_criterion import StoppingByEvaluations
def create_data_frames(objective_dict, gdm_dict, xpl_dict, xpt_dict, solution_dict):
df_gdm = pd.DataFrame.from_dict(gdm_dict, orient='index')
df_gdm = df_gdm.transpose()
df_xpl = pd.DataFrame.from_dict(xpl_dict, orient='index')
df_xpl = df_xpl.transpose()
df_xpt = pd.DataFrame.from_dict(xpt_dict, orient='index')
df_xpt = df_xpt.transpose()
df_objective = pd.DataFrame.from_dict(objective_dict, orient='index')
df_objective = df_objective.transpose()
df_solutions = pd.DataFrame.from_dict(solution_dict, orient='index')
df_solutions = df_solutions.transpose()
return df_objective, df_gdm, df_xpt, df_xpl, df_solutions
def run_algorithm(trials, algorithm):
prob_observer = MechanismProbabilitiesObserver(termination_criteria.get_criterion())
obj_observer = PrintObjectivesObserver(frequency=algorithm.population_size)
dwd_observer = DimensionWiseDiversityObserver(termination_criteria.get_criterion())
ph_observer = PhenotypicDiversityObserver(termination_criteria.get_criterion())
algorithm.observable.register(prob_observer)
algorithm.observable.register(obj_observer)
algorithm.observable.register(dwd_observer)
algorithm.observable.register(ph_observer)
diversity = {}
energy = {}
dwd_xpl = {}
dwd_xpt = {}
final_solution = {}
explore = {}
exploit = {}
do_nothing = {}
diversification_counter = {}
intensification_counter = {}
for i in range(trials):
prob_observer.__init__(termination_criteria.get_criterion())
obj_observer.__init__(frequency=algorithm.population_size)
dwd_observer.__init__(termination_criteria.get_criterion())
ph_observer.__init__(termination_criteria.get_criterion())
#file_path = '/home/pnarloch/workspace/resources/populations/CEC2013/function_' + str(p) + '/population_' \
# + str(i) + '.txt'
# population_generator = ArchiveInjector(file_path=file_path, problem=problem)
# algorithm.population_generator = population_generator
algorithm.run()
idx = 'run-' + str(i + 1)
energy[idx] = obj_observer.fitness_history
dwd_xpl[idx] = dwd_observer.xpl_history
dwd_xpt[idx] = dwd_observer.xpt_history
final_solution[idx] = algorithm.get_best_solution().variables
diversification_counter[idx] = algorithm.diversification_count
intensification_counter[idx] = algorithm.intensification_count
explore[idx] = prob_observer.diversfication_history
exploit[idx] = prob_observer.intensification_history
do_nothing[idx] = prob_observer.do_nothing_history
return energy, diversity, dwd_xpl, dwd_xpt, final_solution, explore, exploit, do_nothing, \
diversification_counter, intensification_counter
if __name__ == '__main__':
sns.set(style="darkgrid")
p = int(sys.argv[1])
d = 10
problem = CEC2013(p, d)
trials = 10
max_evaluations = d * 10000
pop_size = 100
termination_criteria = StoppingByEvaluations(max_evaluations)
selection_operator = DifferentialEvolutionSelection()
best_selection_operator = DifferentialEvolutionBestMinSelection()
crossover_operator = DifferentialEvolutionCrossover(0.9, 0.5, 0.0)
local_search_algorithm = SimulatedAnnealing(
problem=problem,
mutation=PolynomialMutation(probability=0.1, distribution_index=20.0),
termination_criterion=StoppingByEvaluations(max=500)
)
algorithm = DifferentialEvolutionReset(problem=problem,
population_size=pop_size,
termination_criterion=termination_criteria)
algorithm.selection_operator = selection_operator
algorithm.crossover_operator = crossover_operator
algorithm.crossover_operator.boundary_correction = RandomCorrection()
# Random Trials
fitness_indexes, diversity_indexes, xpl, xpt, solutions, explore, exploit, do_nothing, \
diversification_counter, intensification_counter = run_algorithm(trials,
algorithm)
df_fitness, df_gdm, df_xpt, df_xpl, df_solutions = create_data_frames(fitness_indexes,
diversity_indexes,
xpl_dict=xpl,
xpt_dict=xpt,
solution_dict=solutions)
df_explore = pd.DataFrame.from_dict(explore, orient="index")
df_explore = df_explore.transpose()
df_exploit = pd.DataFrame.from_dict(exploit, orient="index")
df_exploit = df_exploit.transpose()
df_do_nothing = pd.DataFrame.from_dict(do_nothing, orient="index")
df_do_nothing = df_do_nothing.transpose()
df_intensification = pd.DataFrame.from_dict(intensification_counter, orient='index')
df_intensification = df_intensification.transpose()
df_diversification = pd.DataFrame.from_dict(diversification_counter, orient='index')
df_diversification = df_diversification.diversification.transpose()
result_path = '/home/pnarloch/workspace/CEC2014/f_' + str(p) + '/'
if not os.path.exists(result_path):
os.mkdir(result_path)
plot_file = result_path + problem.get_name() + '_D' + str(d) + '_' \
+ algorithm.selection_operator.get_name() + '_' + algorithm.get_name() + "_SA"
df_fitness.to_csv(plot_file + '_fitness.csv')
df_xpt.to_csv(plot_file + '_xpt.csv')
df_xpl.to_csv(plot_file + '_xpl.csv')
df_gdm.to_csv(plot_file + '_gdm.csv')
df_solutions.to_csv(plot_file + '_solutions.csv')
df_explore.to_csv(plot_file + '_explore.csv')
df_exploit.to_csv(plot_file + '_exploit.csv')
df_do_nothing.to_csv(plot_file + '_nothing.csv')
df_diversification.to_csv(plot_file + '_diversification.csv')
df_diversification.intensification.to_csv(plot_file + '_intensification.csv')
''' Plotting Energies + Mean '''
jMetalPlot = jPlot.LineChart(df_fitness, "Fitness " + problem.get_name(), "Generations",
"Fitness", plot_file + "_fitness.png")
jMetalPlot.plot()
jMetalPlot.save_plot()
''' Plotting XPL/XPT/GDM for each run '''
for col in df_gdm:
_df = pd.DataFrame()
_df['GDM'] = df_gdm[col]
_df['XPL'] = df_xpl[col]
_df['XPT'] = df_xpt[col]
jMetalPlot.__init__(_df, "Diversity " + problem.get_name() + ' - ' + col, 'Generations',
'Diversity Index', plot_file + '_diversity_' + col + '.png')
jMetalPlot.plot()
jMetalPlot.save_plot()
for col in df_explore:
_df = pd.DataFrame()
_df['Explore'] = df_explore[col]
_df['Exploit'] = df_exploit[col]
_df['Nothing'] = df_do_nothing[col]
jMetalPlot.__init__(_df, "Strategy Probabilities " + problem.get_name() + ' - ' + col, 'Generation',
'Probabilities', plot_file + '_strategy_probability_' + col + '.png')
jMetalPlot.plot()
jMetalPlot.save_plot()
|
en
| 0.478925
|
#file_path = '/home/pnarloch/workspace/resources/populations/CEC2013/function_' + str(p) + '/population_' \ # + str(i) + '.txt' # population_generator = ArchiveInjector(file_path=file_path, problem=problem) # algorithm.population_generator = population_generator # Random Trials Plotting Energies + Mean Plotting XPL/XPT/GDM for each run
| 1.964639
| 2
|
app/admin/__init__.py
|
wanguinjoka/personal-blog
| 0
|
6625633
|
from flask import Blueprint
admin = Blueprint('admin',__name__)
from . import views,forms
|
from flask import Blueprint
admin = Blueprint('admin',__name__)
from . import views,forms
|
none
| 1
| 1.351743
| 1
|
|
main/filesAndForms/migrations/0003_auto_20150322_1638.py
|
nynguyen/sprint_zero_demo
| 0
|
6625634
|
<filename>main/filesAndForms/migrations/0003_auto_20150322_1638.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('filesAndForms', '0002_inputfile_next_one'),
]
operations = [
migrations.AddField(
model_name='inputfile',
name='name',
field=models.CharField(default=datetime.datetime(2015, 3, 22, 20, 38, 56, 580815, tzinfo=utc), max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='inputfile',
name='privacy',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
<filename>main/filesAndForms/migrations/0003_auto_20150322_1638.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('filesAndForms', '0002_inputfile_next_one'),
]
operations = [
migrations.AddField(
model_name='inputfile',
name='name',
field=models.CharField(default=datetime.datetime(2015, 3, 22, 20, 38, 56, 580815, tzinfo=utc), max_length=100),
preserve_default=False,
),
migrations.AddField(
model_name='inputfile',
name='privacy',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.607194
| 2
|
add-donations.py
|
vis4/parteispenden
| 2
|
6625635
|
#!/usr/bin/env python
import mysql.connector
import csv
import sys
new_donations_csv = csv.reader(open(sys.argv[1]), dialect='excel-tab')
# expected format: Name, Typ, Strasse, Plz, Stadt, Partei, Betrag
head = new_donations_csv.next()
expected = 'name,typ,strasse,plz,stadt,partei,betrag,jahr'.split(',')
cols = {}
for col in expected:
for i in range(len(head)):
if col == head[i].lower():
cols[col] = i
break
if col not in cols:
print "Error: column '" + col + "' not found in csv file.\n"
exit(-1)
new_donations = []
for row in new_donations_csv:
if len(row) > 1:
donation = {}
for col in cols:
donation[col] = row[cols[col]]
donation['betrag'] = float(donation['betrag'])
donation['jahr'] = int(donation['jahr'])
new_donations.append(donation)
print len(new_donations), 'new donations'
# connect to mysql database
conn = mysql.connector.connect()
conn.connect(database='parteispenden10', user='root')
cur = conn.cursor()
def add_donation(donation, donor_id, donor_rev):
cur = conn.cursor()
values = (donor_id, donor_rev, donation['partei'], donation['jahr'], donation['betrag'])
cur.execute('INSERT INTO spenden (spender_id, spender_rev, partei_id, jahr, betrag_euro) VALUES (%s, %s, %s, %s, %s)', values)
conn.commit()
def add_donor(donor):
cur = conn.cursor()
values = (donor['name'], donor['strasse'], donor['plz'], donor['stadt'], donor['typ'])
cur.execute('INSERT INTO spender (name, strasse, plz, stadt, typ) VALUES (%s, %s, %s, %s, %s)', values)
conn.commit()
cur.execute('SELECT LAST_INSERT_ID();')
last_id = cur.fetchone()[0]
return last_id
for donation in new_donations:
q = (donation['name'], donation['stadt'])
cur.execute('SELECT id, revision FROM spender WHERE name = %s and stadt = %s LIMIT 1', q)
res = cur.fetchone()
if res is None:
donor_id = add_donor(donation)
donor_rev = 0
else:
donor_id, donor_rev = res
add_donation(donation, donor_id, donor_rev)
exit()
token_count = {}
for donation in new_donations:
tokens = donation['name'].lower().split(' ')
for token in tokens:
if token not in token_count:
token_count[token] = 0
token_count[token] += 1
# read known donor tokens
cur.execute('SELECT spender_id, token FROM spender_token')
known_token = []
for spender_id, token in cur:
token = token.lower()
if token not in token_count:
token_count[token] = 0
token_count[token] += 1
tmp = []
for token in token_count:
tmp.append((token, token_count[token]))
tmp = sorted(tmp, key=lambda r: r[1])
|
#!/usr/bin/env python
import mysql.connector
import csv
import sys
new_donations_csv = csv.reader(open(sys.argv[1]), dialect='excel-tab')
# expected format: Name, Typ, Strasse, Plz, Stadt, Partei, Betrag
head = new_donations_csv.next()
expected = 'name,typ,strasse,plz,stadt,partei,betrag,jahr'.split(',')
cols = {}
for col in expected:
for i in range(len(head)):
if col == head[i].lower():
cols[col] = i
break
if col not in cols:
print "Error: column '" + col + "' not found in csv file.\n"
exit(-1)
new_donations = []
for row in new_donations_csv:
if len(row) > 1:
donation = {}
for col in cols:
donation[col] = row[cols[col]]
donation['betrag'] = float(donation['betrag'])
donation['jahr'] = int(donation['jahr'])
new_donations.append(donation)
print len(new_donations), 'new donations'
# connect to mysql database
conn = mysql.connector.connect()
conn.connect(database='parteispenden10', user='root')
cur = conn.cursor()
def add_donation(donation, donor_id, donor_rev):
cur = conn.cursor()
values = (donor_id, donor_rev, donation['partei'], donation['jahr'], donation['betrag'])
cur.execute('INSERT INTO spenden (spender_id, spender_rev, partei_id, jahr, betrag_euro) VALUES (%s, %s, %s, %s, %s)', values)
conn.commit()
def add_donor(donor):
cur = conn.cursor()
values = (donor['name'], donor['strasse'], donor['plz'], donor['stadt'], donor['typ'])
cur.execute('INSERT INTO spender (name, strasse, plz, stadt, typ) VALUES (%s, %s, %s, %s, %s)', values)
conn.commit()
cur.execute('SELECT LAST_INSERT_ID();')
last_id = cur.fetchone()[0]
return last_id
for donation in new_donations:
q = (donation['name'], donation['stadt'])
cur.execute('SELECT id, revision FROM spender WHERE name = %s and stadt = %s LIMIT 1', q)
res = cur.fetchone()
if res is None:
donor_id = add_donor(donation)
donor_rev = 0
else:
donor_id, donor_rev = res
add_donation(donation, donor_id, donor_rev)
exit()
token_count = {}
for donation in new_donations:
tokens = donation['name'].lower().split(' ')
for token in tokens:
if token not in token_count:
token_count[token] = 0
token_count[token] += 1
# read known donor tokens
cur.execute('SELECT spender_id, token FROM spender_token')
known_token = []
for spender_id, token in cur:
token = token.lower()
if token not in token_count:
token_count[token] = 0
token_count[token] += 1
tmp = []
for token in token_count:
tmp.append((token, token_count[token]))
tmp = sorted(tmp, key=lambda r: r[1])
|
en
| 0.40934
|
#!/usr/bin/env python # expected format: Name, Typ, Strasse, Plz, Stadt, Partei, Betrag # connect to mysql database # read known donor tokens
| 2.953382
| 3
|
tests/test_goal.py
|
axonepro/sdk-ooti
| 1
|
6625636
|
from factories.factories import ProjectFactory, TeamFactory
import unittest
# To read .env variables
import os
import sys
from dotenv import load_dotenv
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from resources import ooti # noqa E402
# Loading environment variables (stored in .env file)
load_dotenv()
OOTI_AUTH = os.getenv("OOTI_AUTH")
OOTI_PASSWORD = os.getenv("OOTI_PASSWORD")
sdk = ooti.OotiAPI(OOTI_AUTH, OOTI_PASSWORD)
sdk.connect()
class TestGoals(unittest.TestCase):
@ classmethod
def setUp(cls):
cls.team_pk = TeamFactory()
def test_get_goals_list(self):
response = sdk.Goals.get_goals_list()
self.assertEqual(response['status'], 200)
def test_create_goal(self):
payload = {
'team': self.team_pk,
'name': 'goal test',
'value': 2,
'year': 2021
}
response = sdk.Goals.create_goal(payload)
self.assertEqual(response['status'], 201)
payload = {
'team': self.team_pk,
'name': 'goal updated',
'value': 5,
'year': 2020
}
update = sdk.Goals.update_goal_details(response['data']['id'], payload)
self.assertEqual(update['status'], 200)
get = sdk.Goals.get_goal_details(response['data']['id'])
self.assertEqual(get['status'], 200)
delete = sdk.Goals.delete_goal(response['data']['id'])
self.assertEqual(delete['status'], 204)
if __name__ == '__main__':
unittest.main()
|
from factories.factories import ProjectFactory, TeamFactory
import unittest
# To read .env variables
import os
import sys
from dotenv import load_dotenv
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from resources import ooti # noqa E402
# Loading environment variables (stored in .env file)
load_dotenv()
OOTI_AUTH = os.getenv("OOTI_AUTH")
OOTI_PASSWORD = os.getenv("OOTI_PASSWORD")
sdk = ooti.OotiAPI(OOTI_AUTH, OOTI_PASSWORD)
sdk.connect()
class TestGoals(unittest.TestCase):
@ classmethod
def setUp(cls):
cls.team_pk = TeamFactory()
def test_get_goals_list(self):
response = sdk.Goals.get_goals_list()
self.assertEqual(response['status'], 200)
def test_create_goal(self):
payload = {
'team': self.team_pk,
'name': 'goal test',
'value': 2,
'year': 2021
}
response = sdk.Goals.create_goal(payload)
self.assertEqual(response['status'], 201)
payload = {
'team': self.team_pk,
'name': 'goal updated',
'value': 5,
'year': 2020
}
update = sdk.Goals.update_goal_details(response['data']['id'], payload)
self.assertEqual(update['status'], 200)
get = sdk.Goals.get_goal_details(response['data']['id'])
self.assertEqual(get['status'], 200)
delete = sdk.Goals.delete_goal(response['data']['id'])
self.assertEqual(delete['status'], 204)
if __name__ == '__main__':
unittest.main()
|
en
| 0.466166
|
# To read .env variables # noqa E402 # Loading environment variables (stored in .env file)
| 2.567573
| 3
|
openbadges/verifier/tasks/crypto.py
|
iankpconcentricsky/badgecheck
| 9
|
6625637
|
<gh_stars>1-10
from Crypto.PublicKey import RSA
from jose import jwk, jws, exceptions as joseexceptions
import json
import six
from ..actions.graph import patch_node
from ..actions.tasks import add_task
from ..actions.validation_report import set_validation_subject
from ..exceptions import TaskPrerequisitesError
from ..state import get_node_by_id, get_node_by_path
from ..utils import list_of, make_string_from_bytes
from .utils import task_result
from .task_types import (ISSUER_PROPERTY_DEPENDENCIES, INTAKE_JSON, SIGNING_KEY_FETCHED, VERIFY_JWS,
VERIFY_KEY_OWNERSHIP, VALIDATE_PROPERTY, VALIDATE_REVOCATIONLIST_ENTRIES,
VERIFY_SIGNED_ASSERTION_NOT_REVOKED)
from .validation import OBClasses, ValueTypes
def process_jws_input(state, task_meta, **options):
try:
data = task_meta['data']
except KeyError:
raise TaskPrerequisitesError()
node_json = jws.get_unverified_claims(data).decode('utf-8')
node_data = json.loads(node_json)
node_id = task_meta.get('node_id', node_data.get('id'))
depth = task_meta.get('depth')
actions = [
add_task(INTAKE_JSON, data=node_json, node_id=node_id, depth=depth),
add_task(VERIFY_JWS, node_id=node_id, data=data, prerequisites=SIGNING_KEY_FETCHED, depth=depth)
]
if node_id:
actions.append(set_validation_subject(node_id))
return task_result(True, "Processed JWS-signed data and queued signature verification task", actions)
def verify_jws_signature(state, task_meta, **options):
try:
data = task_meta['data']
node_id = task_meta['node_id']
key_node = get_node_by_path(state, [node_id, 'verification', 'creator'])
public_pem = key_node['publicKeyPem']
depth = task_meta['depth']
except (KeyError, IndexError,):
raise TaskPrerequisitesError()
actions = [
add_task(VERIFY_KEY_OWNERSHIP, node_id=node_id, depth=depth),
add_task(
VALIDATE_PROPERTY, node_path=[node_id, 'badge', 'issuer'], prop_name='revocationList',
prop_type=ValueTypes.ID, expected_class=OBClasses.RevocationList, fetch=True, required=False,
prerequisites=[ISSUER_PROPERTY_DEPENDENCIES], depth=depth
),
]
key = RSA.import_key(public_pem)
jwkkey = jwk.construct(key, 'RS256').to_dict()
try:
jws.verify(data, jwkkey, None)
except (joseexceptions.JWSError, joseexceptions.JWSSignatureError,) as e:
return task_result(
False, "Signature for node {} failed verification".format(node_id) + " :: " + str(e), actions)
return task_result(
True, "Signature for node {} passed verification".format(node_id), actions)
def verify_key_ownership(state, task_meta, **options):
try:
node_id = task_meta['node_id']
issuer_node = get_node_by_path(state, [node_id, 'badge', 'issuer'])
key_node = get_node_by_path(state, [node_id, 'verification', 'creator'])
key_id = key_node['id']
depth = task_meta['depth']
except (KeyError, IndexError,):
raise TaskPrerequisitesError()
actions = []
if issuer_node.get('revocationList'):
actions.append(add_task(
VERIFY_SIGNED_ASSERTION_NOT_REVOKED,
node_id=node_id,
prerequisites=[VALIDATE_REVOCATIONLIST_ENTRIES],
depth=depth
))
issuer_keys = list_of(issuer_node.get('publicKey'))
if key_id not in issuer_keys:
return task_result(
False,
"Assertion signed by a key {} other than those authorized by issuer profile".format(key_id),
actions)
return task_result(
True, "Assertion signing key {} is properly declared in issuer profile".format(key_id), actions)
def verify_signed_assertion_not_revoked(state, task_meta, **options):
try:
assertion_id = task_meta['node_id']
issuer = get_node_by_path(state, [assertion_id, 'badge', 'issuer'])
except (IndexError, KeyError, TypeError,):
raise TaskPrerequisitesError()
if not issuer.get('revocationList'):
return task_result(True, 'Assertion {} is not revoked. Issuer {} has no revocation list'.format(
assertion_id, issuer.get('id')
))
revocation_list = get_node_by_id(state, issuer['revocationList'])
revoked_assertions = revocation_list['revokedAssertions']
def _is_match(term, container):
if isinstance(container, six.string_types):
return term == container
return container.get('id') == term
revoked_match = [a for a in revoked_assertions if _is_match(assertion_id, a)]
actions = [patch_node(revocation_list['id'], {'revokedAssertions': revoked_match})]
if len(revoked_match):
assertion_records = [i for i in state['graph'] if i.get('id') == assertion_id]
msg = ''
for a in revoked_match:
try:
msg = ' with reason: ' + a['revocationReason']
except (KeyError, TypeError,):
continue
return task_result(False, "Assertion {} has been revoked in RevocationList {}{}".format(
assertion_id, issuer['revocationList'], msg
), actions)
return task_result(True, "Assertion {} is not marked as revoked in RevocationList {}".format(
assertion_id, issuer['revocationList']
), actions)
|
from Crypto.PublicKey import RSA
from jose import jwk, jws, exceptions as joseexceptions
import json
import six
from ..actions.graph import patch_node
from ..actions.tasks import add_task
from ..actions.validation_report import set_validation_subject
from ..exceptions import TaskPrerequisitesError
from ..state import get_node_by_id, get_node_by_path
from ..utils import list_of, make_string_from_bytes
from .utils import task_result
from .task_types import (ISSUER_PROPERTY_DEPENDENCIES, INTAKE_JSON, SIGNING_KEY_FETCHED, VERIFY_JWS,
VERIFY_KEY_OWNERSHIP, VALIDATE_PROPERTY, VALIDATE_REVOCATIONLIST_ENTRIES,
VERIFY_SIGNED_ASSERTION_NOT_REVOKED)
from .validation import OBClasses, ValueTypes
def process_jws_input(state, task_meta, **options):
try:
data = task_meta['data']
except KeyError:
raise TaskPrerequisitesError()
node_json = jws.get_unverified_claims(data).decode('utf-8')
node_data = json.loads(node_json)
node_id = task_meta.get('node_id', node_data.get('id'))
depth = task_meta.get('depth')
actions = [
add_task(INTAKE_JSON, data=node_json, node_id=node_id, depth=depth),
add_task(VERIFY_JWS, node_id=node_id, data=data, prerequisites=SIGNING_KEY_FETCHED, depth=depth)
]
if node_id:
actions.append(set_validation_subject(node_id))
return task_result(True, "Processed JWS-signed data and queued signature verification task", actions)
def verify_jws_signature(state, task_meta, **options):
try:
data = task_meta['data']
node_id = task_meta['node_id']
key_node = get_node_by_path(state, [node_id, 'verification', 'creator'])
public_pem = key_node['publicKeyPem']
depth = task_meta['depth']
except (KeyError, IndexError,):
raise TaskPrerequisitesError()
actions = [
add_task(VERIFY_KEY_OWNERSHIP, node_id=node_id, depth=depth),
add_task(
VALIDATE_PROPERTY, node_path=[node_id, 'badge', 'issuer'], prop_name='revocationList',
prop_type=ValueTypes.ID, expected_class=OBClasses.RevocationList, fetch=True, required=False,
prerequisites=[ISSUER_PROPERTY_DEPENDENCIES], depth=depth
),
]
key = RSA.import_key(public_pem)
jwkkey = jwk.construct(key, 'RS256').to_dict()
try:
jws.verify(data, jwkkey, None)
except (joseexceptions.JWSError, joseexceptions.JWSSignatureError,) as e:
return task_result(
False, "Signature for node {} failed verification".format(node_id) + " :: " + str(e), actions)
return task_result(
True, "Signature for node {} passed verification".format(node_id), actions)
def verify_key_ownership(state, task_meta, **options):
try:
node_id = task_meta['node_id']
issuer_node = get_node_by_path(state, [node_id, 'badge', 'issuer'])
key_node = get_node_by_path(state, [node_id, 'verification', 'creator'])
key_id = key_node['id']
depth = task_meta['depth']
except (KeyError, IndexError,):
raise TaskPrerequisitesError()
actions = []
if issuer_node.get('revocationList'):
actions.append(add_task(
VERIFY_SIGNED_ASSERTION_NOT_REVOKED,
node_id=node_id,
prerequisites=[VALIDATE_REVOCATIONLIST_ENTRIES],
depth=depth
))
issuer_keys = list_of(issuer_node.get('publicKey'))
if key_id not in issuer_keys:
return task_result(
False,
"Assertion signed by a key {} other than those authorized by issuer profile".format(key_id),
actions)
return task_result(
True, "Assertion signing key {} is properly declared in issuer profile".format(key_id), actions)
def verify_signed_assertion_not_revoked(state, task_meta, **options):
try:
assertion_id = task_meta['node_id']
issuer = get_node_by_path(state, [assertion_id, 'badge', 'issuer'])
except (IndexError, KeyError, TypeError,):
raise TaskPrerequisitesError()
if not issuer.get('revocationList'):
return task_result(True, 'Assertion {} is not revoked. Issuer {} has no revocation list'.format(
assertion_id, issuer.get('id')
))
revocation_list = get_node_by_id(state, issuer['revocationList'])
revoked_assertions = revocation_list['revokedAssertions']
def _is_match(term, container):
if isinstance(container, six.string_types):
return term == container
return container.get('id') == term
revoked_match = [a for a in revoked_assertions if _is_match(assertion_id, a)]
actions = [patch_node(revocation_list['id'], {'revokedAssertions': revoked_match})]
if len(revoked_match):
assertion_records = [i for i in state['graph'] if i.get('id') == assertion_id]
msg = ''
for a in revoked_match:
try:
msg = ' with reason: ' + a['revocationReason']
except (KeyError, TypeError,):
continue
return task_result(False, "Assertion {} has been revoked in RevocationList {}{}".format(
assertion_id, issuer['revocationList'], msg
), actions)
return task_result(True, "Assertion {} is not marked as revoked in RevocationList {}".format(
assertion_id, issuer['revocationList']
), actions)
|
none
| 1
| 1.873344
| 2
|
|
src/test/resources/loaderTestData/dependencygraph/pythonproject/module/package1/sub_package1/module1.py
|
jacksonpradolima/comfort
| 2
|
6625638
|
<reponame>jacksonpradolima/comfort<gh_stars>1-10
class Module1(object):
pass
|
class Module1(object):
pass
|
none
| 1
| 1.063144
| 1
|
|
test/conftest.py
|
grauwoelfchen/pyramid_secure_response
| 2
|
6625639
|
<reponame>grauwoelfchen/pyramid_secure_response
import pytest
@pytest.fixture(scope='function')
def dummy_request(): # type: () -> Request
from pyramid import testing
url = 'http://example.org'
settings = {}
req = testing.DummyRequest(
environ={},
locale_name='en',
matched_route=None,
settings=settings,
server_name='example.org',
subdomain='',
host='example.org:80',
application_url=url,
url=url,
host_url=url,
path_url=url,
)
req.registry.settings = settings
return req
|
import pytest
@pytest.fixture(scope='function')
def dummy_request(): # type: () -> Request
from pyramid import testing
url = 'http://example.org'
settings = {}
req = testing.DummyRequest(
environ={},
locale_name='en',
matched_route=None,
settings=settings,
server_name='example.org',
subdomain='',
host='example.org:80',
application_url=url,
url=url,
host_url=url,
path_url=url,
)
req.registry.settings = settings
return req
|
en
| 0.546972
|
# type: () -> Request
| 2.133657
| 2
|
pcg_gazebo/generators/biomes/whittaker_biome.py
|
TForce1/pcg_gazebo
| 40
|
6625640
|
# Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .biome import Biome
class WhittakerBiome(Biome):
def __init__(self):
super(WhittakerBiome, self).__init__(
n_moisture_zones=6,
n_elevation_zones=4)
biomes = dict(
snow=[248, 248, 248],
tundra=[221, 221, 187],
bare=[187, 187, 187],
scorched=[153, 153, 153],
taiga=[204, 212, 187],
shrubland=[196, 204, 187],
temperate_desert=[228, 232, 202],
temperate_rain_forest=[164, 196, 168],
temperate_deciduous_forest=[180, 201, 169],
grassland=[196, 212, 170],
tropical_rain_forest=[156, 187, 169],
tropical_seasonal_forest=[169, 204, 164],
subtropical_desert=[233, 221, 199]
)
for tag in biomes:
self.add_biome(tag, color=biomes[tag])
# Set biomes for elevation #0
self.add_rule('subtropical_desert', 0, 0)
self.add_rule('grassland', 1, 0)
self.add_rule('tropical_seasonal_forest', 2, 0)
self.add_rule('tropical_seasonal_forest', 3, 0)
self.add_rule('tropical_rain_forest', 4, 0)
self.add_rule('tropical_rain_forest', 5, 0)
# Set biomes for elevation #1
self.add_rule('temperate_desert', 0, 1)
self.add_rule('grassland', 1, 1)
self.add_rule('grassland', 2, 1)
self.add_rule('temperate_deciduous_forest', 3, 1)
self.add_rule('temperate_deciduous_forest', 4, 1)
self.add_rule('temperate_rain_forest', 5, 1)
# Set biomes for elevation #2
self.add_rule('temperate_desert', 0, 2)
self.add_rule('temperate_desert', 1, 2)
self.add_rule('shrubland', 2, 2)
self.add_rule('shrubland', 3, 2)
self.add_rule('taiga', 4, 2)
self.add_rule('taiga', 5, 2)
# Set biomes for elevation #3
self.add_rule('scorched', 0, 3)
self.add_rule('bare', 1, 3)
self.add_rule('tundra', 2, 3)
self.add_rule('snow', 3, 3)
self.add_rule('snow', 4, 3)
self.add_rule('snow', 5, 3)
self.set_min_height(100.0, 3)
self.set_fade_dist(1, 3)
self.set_min_height(80.0, 2)
self.set_fade_dist(1, 2)
self.set_min_height(10.0, 1)
self.set_fade_dist(1, 1)
|
# Copyright (c) 2020 - The Procedural Generation for Gazebo authors
# For information on the respective copyright owner see the NOTICE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .biome import Biome
class WhittakerBiome(Biome):
def __init__(self):
super(WhittakerBiome, self).__init__(
n_moisture_zones=6,
n_elevation_zones=4)
biomes = dict(
snow=[248, 248, 248],
tundra=[221, 221, 187],
bare=[187, 187, 187],
scorched=[153, 153, 153],
taiga=[204, 212, 187],
shrubland=[196, 204, 187],
temperate_desert=[228, 232, 202],
temperate_rain_forest=[164, 196, 168],
temperate_deciduous_forest=[180, 201, 169],
grassland=[196, 212, 170],
tropical_rain_forest=[156, 187, 169],
tropical_seasonal_forest=[169, 204, 164],
subtropical_desert=[233, 221, 199]
)
for tag in biomes:
self.add_biome(tag, color=biomes[tag])
# Set biomes for elevation #0
self.add_rule('subtropical_desert', 0, 0)
self.add_rule('grassland', 1, 0)
self.add_rule('tropical_seasonal_forest', 2, 0)
self.add_rule('tropical_seasonal_forest', 3, 0)
self.add_rule('tropical_rain_forest', 4, 0)
self.add_rule('tropical_rain_forest', 5, 0)
# Set biomes for elevation #1
self.add_rule('temperate_desert', 0, 1)
self.add_rule('grassland', 1, 1)
self.add_rule('grassland', 2, 1)
self.add_rule('temperate_deciduous_forest', 3, 1)
self.add_rule('temperate_deciduous_forest', 4, 1)
self.add_rule('temperate_rain_forest', 5, 1)
# Set biomes for elevation #2
self.add_rule('temperate_desert', 0, 2)
self.add_rule('temperate_desert', 1, 2)
self.add_rule('shrubland', 2, 2)
self.add_rule('shrubland', 3, 2)
self.add_rule('taiga', 4, 2)
self.add_rule('taiga', 5, 2)
# Set biomes for elevation #3
self.add_rule('scorched', 0, 3)
self.add_rule('bare', 1, 3)
self.add_rule('tundra', 2, 3)
self.add_rule('snow', 3, 3)
self.add_rule('snow', 4, 3)
self.add_rule('snow', 5, 3)
self.set_min_height(100.0, 3)
self.set_fade_dist(1, 3)
self.set_min_height(80.0, 2)
self.set_fade_dist(1, 2)
self.set_min_height(10.0, 1)
self.set_fade_dist(1, 1)
|
en
| 0.82842
|
# Copyright (c) 2020 - The Procedural Generation for Gazebo authors # For information on the respective copyright owner see the NOTICE file # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Set biomes for elevation #0 # Set biomes for elevation #1 # Set biomes for elevation #2 # Set biomes for elevation #3
| 2.262902
| 2
|
modules/constants.py
|
lydell/userscript-proxy
| 1
|
6625641
|
<reponame>lydell/userscript-proxy<filename>modules/constants.py
VERSION_PREFIX: str = "v"
APP_NAME: str = "Userscript Proxy"
VERSION: str = "0.11.0"
ATTRIBUTE_UP_VERSION: str = "data-userscript-proxy-version"
DEFAULT_PORT: int = 8080
DEFAULT_USERSCRIPTS_DIR: str = "userscripts"
DEFAULT_QUERY_PARAM_TO_DISABLE: str = "nouserscripts"
|
VERSION_PREFIX: str = "v"
APP_NAME: str = "Userscript Proxy"
VERSION: str = "0.11.0"
ATTRIBUTE_UP_VERSION: str = "data-userscript-proxy-version"
DEFAULT_PORT: int = 8080
DEFAULT_USERSCRIPTS_DIR: str = "userscripts"
DEFAULT_QUERY_PARAM_TO_DISABLE: str = "nouserscripts"
|
none
| 1
| 1.246026
| 1
|
|
ckeditor/views.py
|
redwerk/django-ckeditor
| 0
|
6625642
|
from datetime import datetime
import os
from django.conf import settings
from django.core.files.storage import default_storage
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from ckeditor import image_processing
from ckeditor import utils
def get_upload_filename(upload_name, user):
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path.
if getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False):
user_path = user.username
else:
user_path = ''
# Generate date based path to put uploaded file.
date_path = datetime.now().strftime('%Y/%m/%d')
# Complete upload path (upload_path + date_path).
upload_path = os.path.join(
settings.CKEDITOR_UPLOAD_PATH, user_path, date_path)
if getattr(settings, "CKEDITOR_UPLOAD_SLUGIFY_FILENAME", True):
upload_name = utils.slugify_filename(upload_name)
return default_storage.get_available_name(os.path.join(upload_path, upload_name))
class ImageUploadView(generic.View):
http_method_names = ['post']
def post(self, request, **kwargs):
"""
Uploads a file and send back its URL to CKEditor.
"""
# Get the uploaded file from request.
upload = request.FILES['upload']
#Verify that file is a valid image
backend = image_processing.get_backend()
try:
backend.image_verify(upload)
except utils.NotAnImageException:
return HttpResponse("""
<script type='text/javascript'>
alert('Invalid image')
window.parent.CKEDITOR.tools.callFunction({0});
</script>""".format(request.GET['CKEditorFuncNum']))
# Open output file in which to store upload.
upload_filename = get_upload_filename(upload.name, request.user)
saved_path = default_storage.save(upload_filename, upload)
if backend.should_create_thumbnail(saved_path):
backend.create_thumbnail(saved_path)
url = utils.get_media_url(saved_path)
# Respond with Javascript sending ckeditor upload url.
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction({0}, '{1}');
</script>""".format(request.GET['CKEditorFuncNum'], url))
upload = csrf_exempt(ImageUploadView.as_view())
def get_image_files(user=None, path=''):
"""
Recursively walks all dirs under upload dir and generates a list of
full paths for each file found.
"""
# If a user is provided and CKEDITOR_RESTRICT_BY_USER is True,
# limit images to user specific path, but not for superusers.
STORAGE_DIRECTORIES = 0
STORAGE_FILES = 1
restrict = getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False)
if user and not user.is_superuser and restrict:
user_path = user.username
else:
user_path = ''
browse_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path, path)
try:
storage_list = default_storage.listdir(browse_path)
except NotImplementedError:
return
except OSError:
return
for filename in storage_list[STORAGE_FILES]:
if os.path.splitext(filename)[0].endswith('_thumb') or os.path.basename(filename).startswith('.'):
continue
filename = os.path.join(browse_path, filename)
yield filename
for directory in storage_list[STORAGE_DIRECTORIES]:
if directory.startswith('.'):
continue
directory_path = os.path.join(path, directory)
for element in get_image_files(user=user, path=directory_path):
yield element
def get_files_browse_urls(user=None):
"""
Recursively walks all dirs under upload dir and generates a list of
thumbnail and full image URL's for each file found.
"""
files = []
for filename in get_image_files(user=user):
src = utils.get_media_url(filename)
if getattr(settings, 'CKEDITOR_IMAGE_BACKEND', None):
thumb = utils.get_media_url(utils.get_thumb_filename(filename))
else:
thumb = src
files.append({
'thumb': thumb,
'src': src,
'is_image': is_image(src)
})
return files
def is_image(path):
ext = path.split('.')[-1].lower()
return ext in ['jpg', 'jpeg', 'png', 'gif']
def browse(request):
context = RequestContext(request, {
'files': get_files_browse_urls(request.user),
})
return render_to_response('browse.html', context)
|
from datetime import datetime
import os
from django.conf import settings
from django.core.files.storage import default_storage
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from ckeditor import image_processing
from ckeditor import utils
def get_upload_filename(upload_name, user):
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path.
if getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False):
user_path = user.username
else:
user_path = ''
# Generate date based path to put uploaded file.
date_path = datetime.now().strftime('%Y/%m/%d')
# Complete upload path (upload_path + date_path).
upload_path = os.path.join(
settings.CKEDITOR_UPLOAD_PATH, user_path, date_path)
if getattr(settings, "CKEDITOR_UPLOAD_SLUGIFY_FILENAME", True):
upload_name = utils.slugify_filename(upload_name)
return default_storage.get_available_name(os.path.join(upload_path, upload_name))
class ImageUploadView(generic.View):
http_method_names = ['post']
def post(self, request, **kwargs):
"""
Uploads a file and send back its URL to CKEditor.
"""
# Get the uploaded file from request.
upload = request.FILES['upload']
#Verify that file is a valid image
backend = image_processing.get_backend()
try:
backend.image_verify(upload)
except utils.NotAnImageException:
return HttpResponse("""
<script type='text/javascript'>
alert('Invalid image')
window.parent.CKEDITOR.tools.callFunction({0});
</script>""".format(request.GET['CKEditorFuncNum']))
# Open output file in which to store upload.
upload_filename = get_upload_filename(upload.name, request.user)
saved_path = default_storage.save(upload_filename, upload)
if backend.should_create_thumbnail(saved_path):
backend.create_thumbnail(saved_path)
url = utils.get_media_url(saved_path)
# Respond with Javascript sending ckeditor upload url.
return HttpResponse("""
<script type='text/javascript'>
window.parent.CKEDITOR.tools.callFunction({0}, '{1}');
</script>""".format(request.GET['CKEditorFuncNum'], url))
upload = csrf_exempt(ImageUploadView.as_view())
def get_image_files(user=None, path=''):
"""
Recursively walks all dirs under upload dir and generates a list of
full paths for each file found.
"""
# If a user is provided and CKEDITOR_RESTRICT_BY_USER is True,
# limit images to user specific path, but not for superusers.
STORAGE_DIRECTORIES = 0
STORAGE_FILES = 1
restrict = getattr(settings, 'CKEDITOR_RESTRICT_BY_USER', False)
if user and not user.is_superuser and restrict:
user_path = user.username
else:
user_path = ''
browse_path = os.path.join(settings.CKEDITOR_UPLOAD_PATH, user_path, path)
try:
storage_list = default_storage.listdir(browse_path)
except NotImplementedError:
return
except OSError:
return
for filename in storage_list[STORAGE_FILES]:
if os.path.splitext(filename)[0].endswith('_thumb') or os.path.basename(filename).startswith('.'):
continue
filename = os.path.join(browse_path, filename)
yield filename
for directory in storage_list[STORAGE_DIRECTORIES]:
if directory.startswith('.'):
continue
directory_path = os.path.join(path, directory)
for element in get_image_files(user=user, path=directory_path):
yield element
def get_files_browse_urls(user=None):
"""
Recursively walks all dirs under upload dir and generates a list of
thumbnail and full image URL's for each file found.
"""
files = []
for filename in get_image_files(user=user):
src = utils.get_media_url(filename)
if getattr(settings, 'CKEDITOR_IMAGE_BACKEND', None):
thumb = utils.get_media_url(utils.get_thumb_filename(filename))
else:
thumb = src
files.append({
'thumb': thumb,
'src': src,
'is_image': is_image(src)
})
return files
def is_image(path):
ext = path.split('.')[-1].lower()
return ext in ['jpg', 'jpeg', 'png', 'gif']
def browse(request):
context = RequestContext(request, {
'files': get_files_browse_urls(request.user),
})
return render_to_response('browse.html', context)
|
en
| 0.865042
|
# If CKEDITOR_RESTRICT_BY_USER is True upload file to user specific path. # Generate date based path to put uploaded file. # Complete upload path (upload_path + date_path). Uploads a file and send back its URL to CKEditor. # Get the uploaded file from request. #Verify that file is a valid image <script type='text/javascript'> alert('Invalid image') window.parent.CKEDITOR.tools.callFunction({0}); </script> # Open output file in which to store upload. # Respond with Javascript sending ckeditor upload url. <script type='text/javascript'> window.parent.CKEDITOR.tools.callFunction({0}, '{1}'); </script> Recursively walks all dirs under upload dir and generates a list of full paths for each file found. # If a user is provided and CKEDITOR_RESTRICT_BY_USER is True, # limit images to user specific path, but not for superusers. Recursively walks all dirs under upload dir and generates a list of thumbnail and full image URL's for each file found.
| 2.287966
| 2
|
src/tests/test_engine.py
|
bieniekmateusz/forcebalance
| 0
|
6625643
|
from __future__ import absolute_import
from builtins import zip
from builtins import range
import pytest
from forcebalance.nifty import *
from forcebalance.gmxio import GMX
from forcebalance.tinkerio import TINKER
from forcebalance.openmmio import OpenMM
from collections import OrderedDict
from .__init__ import ForceBalanceTestCase
# Set SAVEDATA to True and run the tests in order to save data
# to a file for future reference. This is easier to use for troubleshooting
# vs. comparing multiple programs against each other, b/c we don't know
# which one changed.
SAVEDATA=False
class TestAmber99SB(ForceBalanceTestCase):
""" Amber99SB unit test consisting of ten structures of
ACE-ALA-NME interacting with ACE-GLU-NME. The tests check for
whether the OpenMM, GMX, and TINKER Engines produce consistent
results for:
1) Single-point energies and forces over all ten structures
2) Minimized energies and RMSD from the initial geometry for a selected structure
3) Interaction energies between the two molecules over all ten structures
4) Multipole moments of a selected structure
5) Multipole moments of a selected structure after geometry optimization
6) Normal modes of a selected structure
7) Normal modes of a selected structure after geometry optimization
If the engines are setting up the calculation correctly, then the
remaining differences between results are due to differences in
the parameter files or software implementations.
The criteria in this unit test are more stringent than normal
simulations. In order for the software packages to agree to
within the criteria, I had to do the following:
- Remove improper dihedrals from the force field, because there is
an ambiguity in the atom ordering which leads to force differences
- Increase the number of decimal points in the "fudgeQQ" parameter
in the GROMACS .itp file
- Increase two torsional barriers to ensure optimizer converges
to the same local minimum consistently
- Change the "electric" conversion factor in the TINKER .prm file
- Compile GROMACS in double precision
Residual errors are as follows:
Potential energies: <0.01 kJ/mol (<1e-4 fractional error)
Forces: <0.1 kJ/mol/nm (<1e-3 fractional error)
Energy of optimized geometry: < 0.001 kcal/mol
RMSD from starting structure: < 0.001 Angstrom
Interaction energies: < 0.0001 kcal/mol
Multipole moments: < 0.001 Debye / Debye Angstrom
Multipole moments (optimized): < 0.01 Debye / Debye Angstrom
Vibrational frequencies: < 0.5 wavenumber (~ 1e-4 fractional error)
Vibrational eigenvectors: < 0.05 (on 11/2019, updated these)
"""
@classmethod
def setup_class(cls):
"""
setup any state specific to the execution of the given class (which usually contains tests).
"""
super(TestAmber99SB, cls).setup_class()
tinkerpath = which('testgrad')
# try to find mdrun_d or gmx_d
# gmx should be built with config -DGMX_DOUBLE=ON
gmxpath = which('mdrun_d') or which('gmx_d')
gmxsuffix = '_d'
# Tests will FAIL if use single precision gromacs
# gmxpath = which('mdrun') or which('gmx')
# gmxsuffix = ''
# self.logger.debug("\nBuilding options for target...\n")
cls.cwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(os.path.join(cls.cwd, "files", "amber_alaglu"))
cls.tmpfolder = os.path.join(cls.cwd, "files", "amber_alaglu", "temp")
if not os.path.exists(cls.tmpfolder):
os.makedirs(cls.tmpfolder)
os.chdir(cls.tmpfolder)
for i in ["topol.top", "shot.mdp", "a99sb.xml", "a99sb.prm", "all.gro", "all.arc", "AceGluNme.itp", "AceAlaNme.itp", "a99sb.itp"]:
os.system("ln -fs ../%s" % i)
cls.engines = OrderedDict()
# Set up GMX engine
if gmxpath != '':
cls.engines['GMX'] = GMX(coords="all.gro", gmx_top="topol.top", gmx_mdp="shot.mdp", gmxpath=gmxpath, gmxsuffix=gmxsuffix)
else:
logger.warn("GROMACS cannot be found, skipping GMX tests.")
# Set up TINKER engine
if tinkerpath != '':
cls.engines['TINKER'] = TINKER(coords="all.arc", tinker_key="alaglu.key", tinkerpath=tinkerpath)
else:
logger.warn("TINKER cannot be found, skipping TINKER tests.")
# Set up OpenMM engine
try:
import simtk.openmm
cls.engines['OpenMM'] = OpenMM(coords="all.gro", pdb="conf.pdb", ffxml="a99sb.xml", platname="Reference", precision="double")
except:
logger.warn("OpenMM cannot be imported, skipping OpenMM tests.")
@classmethod
def teardown_class(cls):
"""
teardown any state that was previously setup with a call to setup_class.
"""
os.chdir(cls.cwd)
# shutil.rmtree(cls.cwd, "files", "amber_alaglu", "temp")
def setup_method(self):
os.chdir(self.tmpfolder)
def test_energy_force(self):
""" Test GMX, OpenMM, and TINKER energy and forces using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER energy and forces using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.energy_force()
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_force.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_energy_force.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1][:,0], RefData[:,0], rtol=0, atol=0.01,
err_msg="%s energies do not match the reference" % (n1))
np.testing.assert_allclose(Data[n1][:,1:].flatten(), RefData[:,1:].flatten(),
rtol=0, atol=0.1, err_msg="%s forces do not match the reference" % (n1))
def test_optimized_geometries(self):
""" Test GMX, OpenMM, and TINKER optimized geometries and RMSD using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER optimized geometries and RMSD using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.energy_rmsd(5)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_optimized_geometries.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_optimized_geometries.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
print("%s vs Reference energies:" % n1, Data[n1][0], RefData[0])
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1][0], RefData[0], rtol=0, atol=0.001,
err_msg="%s optimized energies do not match the reference" % n1)
np.testing.assert_allclose(Data[n1][1], RefData[1], rtol=0, atol=0.001,
err_msg="%s RMSD from starting structure do not match the reference" % n1)
def test_interaction_energies(self):
""" Test GMX, OpenMM, and TINKER interaction energies using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER interaction energies using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.interaction_energy(fraga=list(range(22)), fragb=list(range(22, 49)))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_interaction_energies.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_interaction_energies.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1], RefData, rtol=0, atol=0.0001,
err_msg="%s interaction energies do not match the reference" % n1)
def test_multipole_moments(self):
""" Test GMX, OpenMM, and TINKER multipole moments using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER multipole moments using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.multipole_moments(shot=5, optimize=False)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['dipole'].values())))
fout = os.path.join(datadir, 'test_multipole_moments.quadrupole.dat')
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['quadrupole'].values())))
RefDip = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.dipole.dat'))
RefQuad = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.quadrupole.dat'))
for n1 in self.engines.keys():
d1 = np.array(list(Data[n1]['dipole'].values()))
q1 = np.array(list(Data[n1]['quadrupole'].values()))
np.testing.assert_allclose(d1, RefDip, rtol=0, atol=0.001, err_msg="%s dipole moments do not match the reference" % n1)
np.testing.assert_allclose(q1, RefQuad, rtol=0, atol=0.001, err_msg="%s quadrupole moments do not match the reference" % n1)
def test_multipole_moments_optimized(self):
""" Test GMX, OpenMM, and TINKER multipole moments at optimized geometries """
#==================================================#
#| Geometry-optimized multipole moments; requires |#
#| double precision in order to pass! |#
#==================================================#
printcool("Test GMX, OpenMM, and TINKER multipole moments at optimized geometries")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.multipole_moments(shot=5, optimize=True)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['dipole'].values())))
fout = os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat')
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['quadrupole'].values())))
RefDip = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat'))
RefQuad = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat'))
for n1 in self.engines.keys():
d1 = np.array(list(Data[n1]['dipole'].values()))
q1 = np.array(list(Data[n1]['quadrupole'].values()))
np.testing.assert_allclose(d1, RefDip, rtol=0, atol=0.02, err_msg="%s dipole moments at optimized geometry do not match the reference" % n1)
np.testing.assert_allclose(q1, RefQuad, rtol=0, atol=0.02, err_msg="%s quadrupole moments at optimized geometry do not match the reference" % n1)
def test_normal_modes(self):
""" Test GMX TINKER and OpenMM normal modes """
printcool("Test GMX, TINKER, OpenMM normal modes")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
FreqG, ModeG = self.engines['GMX'].normal_modes(shot=5, optimize=False)
FreqT, ModeT = self.engines['TINKER'].normal_modes(shot=5, optimize=False)
FreqO, ModeO = self.engines['OpenMM'].normal_modes(shot=5, optimize=False)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_normal_modes.freq.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, FreqT)
fout = os.path.join(datadir, 'test_normal_modes.mode.dat.npy')
# Need to save as binary data since it's a multidimensional array
np.save(fout, ModeT)
FreqRef = np.loadtxt(os.path.join(datadir, 'test_normal_modes.freq.dat'))
ModeRef = np.load(os.path.join(datadir, 'test_normal_modes.mode.dat.npy'))
for Freq, Mode, Name in [(FreqG, ModeG, 'GMX'), (FreqT, ModeT, 'TINKER'), (FreqO, ModeO, 'OpenMM')]:
iv = -1
for v, vr, m, mr in zip(Freq, FreqRef, Mode, ModeRef):
iv += 1
# Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3.
if vr < 0: continue# or iv < 3: continue
# Frequency tolerance is half a wavenumber.
np.testing.assert_allclose(v, vr, rtol=0, atol=0.5,
err_msg="%s vibrational frequencies do not match the reference" % Name)
delta = 0.05
for a in range(len(m)):
try:
np.testing.assert_allclose(m[a], mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
except:
np.testing.assert_allclose(m[a], -1.0*mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
def test_normal_modes_optimized(self):
""" Test GMX TINKER and OpenMM normal modes at optimized geometry """
printcool("Test GMX, TINKER, OpenMM normal modes at optimized geometry")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
FreqG, ModeG = self.engines['GMX'].normal_modes(shot=5, optimize=True)
FreqT, ModeT = self.engines['TINKER'].normal_modes(shot=5, optimize=True)
FreqO, ModeO = self.engines['OpenMM'].normal_modes(shot=5, optimize=True)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_normal_modes_optimized.freq.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, FreqT)
fout = os.path.join(datadir, 'test_normal_modes_optimized.mode.dat')
# Need to save as binary data since it's a multidimensional array
np.save(fout, ModeT)
FreqRef = np.loadtxt(os.path.join(datadir, 'test_normal_modes_optimized.freq.dat'))
ModeRef = np.load(os.path.join(datadir, 'test_normal_modes_optimized.mode.dat.npy'))
for Freq, Mode, Name in [(FreqG, ModeG, 'GMX'), (FreqT, ModeT, 'TINKER'), (FreqO, ModeO, 'OpenMM')]:
iv = -1
for v, vr, m, mr in zip(Freq, FreqRef, Mode, ModeRef):
iv += 1
# Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3.
if vr < 0: continue# or iv < 3: continue
# Frequency tolerance is half a wavenumber.
np.testing.assert_allclose(v, vr, rtol=0, atol=0.5,
err_msg="%s vibrational frequencies do not match the reference" % Name)
delta = 0.05
for a in range(len(m)):
try:
np.testing.assert_allclose(m[a], mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
except:
np.testing.assert_allclose(m[a], -1.0*mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
class TestAmoebaWater6(ForceBalanceTestCase):
""" AMOEBA unit test consisting of a water hexamer. The test
checks for whether the OpenMM and TINKER Engines produce
consistent results for:
1) Single-point energy and force
2) Minimized energies and RMSD from the initial geometry
3) Interaction energies between two groups of molecules
4) Multipole moments
5) Multipole moments after geometry optimization
Due to careful validation of OpenMM, the results agree with TINKER
to within very stringent criteria. Residual errors are as follows:
Potential energies: <0.001 kJ/mol (<1e-5 fractional error)
Forces: <0.01 kJ/mol/nm (<1e-4 fractional error)
Energy of optimized geometry: < 0.0001 kcal/mol
RMSD from starting structure: < 0.001 Angstrom
Interaction energies: < 0.0001 kcal/mol
Multipole moments: < 0.001 Debye / Debye Angstrom
Multipole moments (optimized): < 0.01 Debye / Debye Angstrom
"""
@classmethod
def setup_class(cls):
super(TestAmoebaWater6, cls).setup_class()
#self.logger.debug("\nBuilding options for target...\n")
cls.cwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(os.path.join(cls.cwd, "files", "amoeba_h2o6"))
cls.tmpfolder = os.path.join(cls.cwd, "files", "amoeba_h2o6", "temp")
if not os.path.exists(cls.tmpfolder):
os.makedirs(cls.tmpfolder)
os.chdir(cls.tmpfolder)
os.system("ln -s ../prism.pdb")
os.system("ln -s ../prism.key")
os.system("ln -s ../hex.arc")
os.system("ln -s ../water.prm")
os.system("ln -s ../amoebawater.xml")
cls.O = OpenMM(coords="hex.arc", pdb="prism.pdb", ffxml="amoebawater.xml", precision="double", \
mmopts={'rigidWater':False, 'mutualInducedTargetEpsilon':1e-6})
tinkerpath = which('testgrad')
if tinkerpath:
cls.T = TINKER(coords="hex.arc", tinker_key="prism.key", tinkerpath=tinkerpath)
@classmethod
def teardown_class(cls):
"""
teardown any state that was previously setup with a call to setup_class.
"""
os.chdir(cls.cwd)
# shutil.rmtree(cls.cwd, "files", "amoeba_h2o6", "temp")
def setup_method(self):
os.chdir(self.tmpfolder)
def test_energy_force(self):
""" Test OpenMM and TINKER energy and forces with AMOEBA force field """
printcool("Testing OpenMM and TINKER energy and force with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
EF_O = self.O.energy_force()[0]
EF_T = self.T.energy_force()[0]
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_force.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, EF_T)
EF_R = np.loadtxt(os.path.join(datadir, 'test_energy_force.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA energy to within 0.001 kJ\n")
np.testing.assert_allclose(EF_O[0], EF_R[0],
err_msg="OpenMM energy does not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(EF_T[0], EF_R[0],
err_msg="TINKER energy does not match the reference", rtol=0, atol=0.001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA force to within 0.01 kJ/mol/nm\n")
np.testing.assert_allclose(EF_O[1:], EF_R[1:],
err_msg="OpenMM forces do not match the reference", rtol=0, atol=0.01)
np.testing.assert_allclose(EF_T[1:], EF_R[1:],
err_msg="TINKER forces do not match the reference", rtol=0, atol=0.01)
def test_energy_rmsd(self):
""" Test OpenMM and TINKER optimized geometries with AMOEBA force field """
pytest.skip("Need to reduce dependence on the TINKER build")
printcool("Testing OpenMM and TINKER optimized geometry with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
EO, RO = self.O.energy_rmsd()
ET, RT = self.T.energy_rmsd()
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_rmsd.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array([ET, RT]))
RefData = os.path.join(datadir, 'test_energy_rmsd.dat')
ERef = RefData[0]
RRef = RefData[1]
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct minimized energy to within 0.0001 kcal\n")
np.testing.assert_allclose(EO, ERef,
err_msg="OpenMM minimized energy does not match the reference", rtol=0, atol=0.0001)
np.testing.assert_allclose(ET, ERef,
err_msg="TINKER minimized energy does not match the reference", rtol=0, atol=0.0001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct RMSD to starting structure\n")
np.testing.assert_allclose(RO, RRef,
err_msg="OpenMM RMSD does not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(RT, RRef,
err_msg="TINKER RMSD does not match the reference", rtol=0, atol=0.001)
def test_interaction_energy(self):
""" Test OpenMM and TINKER interaction energies with AMOEBA force field """
printcool("Testing OpenMM and TINKER interaction energy with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
IO = self.O.interaction_energy(fraga=list(range(9)), fragb=list(range(9, 18)))
IT = self.T.interaction_energy(fraga=list(range(9)), fragb=list(range(9, 18)))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_interaction_energy.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array([IT]))
IR = np.loadtxt(os.path.join(datadir, 'test_interaction_energy.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct interaction energy\n")
np.testing.assert_allclose(IO, IR,
err_msg="OpenMM interaction energies do not match the reference", rtol=0, atol=0.0001)
np.testing.assert_allclose(IT, IR,
err_msg="TINKER interaction energies do not match the reference", rtol=0, atol=0.0001)
def test_multipole_moments(self):
""" Test OpenMM and TINKER multipole moments with AMOEBA force field """
printcool("Testing OpenMM and TINKER multipole moments with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
MO = self.O.multipole_moments(optimize=False)
DO = np.array(list(MO['dipole'].values()))
QO = np.array(list(MO['quadrupole'].values()))
MT = self.T.multipole_moments(optimize=False)
DT = np.array(list(MT['dipole'].values()))
QT = np.array(list(MT['quadrupole'].values()))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, DT)
fout = os.path.join(datadir, 'test_multipole_moments.quadrupole.dat')
np.savetxt(fout, QT)
DR = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.dipole.dat'))
QR = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.quadrupole.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole\n")
np.testing.assert_allclose(DO, DR,
err_msg="OpenMM dipoles do not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(DT, DR,
err_msg="TINKER dipoles do not match the reference", rtol=0, atol=0.001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole\n")
np.testing.assert_allclose(QO, QR,
err_msg="OpenMM quadrupoles do not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(QT, QR,
err_msg="TINKER quadrupoles do not match the reference", rtol=0, atol=0.001)
def test_multipole_moments_optimized(self):
""" Test OpenMM and TINKER multipole moments with AMOEBA force field """
pytest.skip("Need to reduce dependence on the TINKER build")
printcool("Testing OpenMM and TINKER multipole moments with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
MO1 = self.O.multipole_moments(optimize=True)
DO1 = np.array(list(MO1['dipole'].values()))
QO1 = np.array(list(MO1['quadrupole'].values()))
MT1 = self.T.multipole_moments(optimize=True)
DT1 = np.array(list(MT1['dipole'].values()))
QT1 = np.array(list(MT1['quadrupole'].values()))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, DT1)
fout = os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat')
np.savetxt(fout, QT1)
DR1 = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat'))
QR1 = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole when geometries are optimized\n")
np.testing.assert_allclose(DO1, DR1, rtol=0, atol=0.001,
err_msg="OpenMM dipoles do not match the reference when geometries are optimized")
np.testing.assert_allclose(DT1, DR1, rtol=0, atol=0.001,
err_msg="TINKER dipoles do not match the reference when geometries are optimized")
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole when geometries are optimized\n")
np.testing.assert_allclose(QO1, QR1, rtol=0, atol=0.01,
err_msg="OpenMM quadrupoles do not match the reference when geometries are optimized")
np.testing.assert_allclose(QT1, QR1, rtol=0, atol=0.01,
err_msg="TINKER quadrupoles do not match the reference when geometries are optimized")
|
from __future__ import absolute_import
from builtins import zip
from builtins import range
import pytest
from forcebalance.nifty import *
from forcebalance.gmxio import GMX
from forcebalance.tinkerio import TINKER
from forcebalance.openmmio import OpenMM
from collections import OrderedDict
from .__init__ import ForceBalanceTestCase
# Set SAVEDATA to True and run the tests in order to save data
# to a file for future reference. This is easier to use for troubleshooting
# vs. comparing multiple programs against each other, b/c we don't know
# which one changed.
SAVEDATA=False
class TestAmber99SB(ForceBalanceTestCase):
""" Amber99SB unit test consisting of ten structures of
ACE-ALA-NME interacting with ACE-GLU-NME. The tests check for
whether the OpenMM, GMX, and TINKER Engines produce consistent
results for:
1) Single-point energies and forces over all ten structures
2) Minimized energies and RMSD from the initial geometry for a selected structure
3) Interaction energies between the two molecules over all ten structures
4) Multipole moments of a selected structure
5) Multipole moments of a selected structure after geometry optimization
6) Normal modes of a selected structure
7) Normal modes of a selected structure after geometry optimization
If the engines are setting up the calculation correctly, then the
remaining differences between results are due to differences in
the parameter files or software implementations.
The criteria in this unit test are more stringent than normal
simulations. In order for the software packages to agree to
within the criteria, I had to do the following:
- Remove improper dihedrals from the force field, because there is
an ambiguity in the atom ordering which leads to force differences
- Increase the number of decimal points in the "fudgeQQ" parameter
in the GROMACS .itp file
- Increase two torsional barriers to ensure optimizer converges
to the same local minimum consistently
- Change the "electric" conversion factor in the TINKER .prm file
- Compile GROMACS in double precision
Residual errors are as follows:
Potential energies: <0.01 kJ/mol (<1e-4 fractional error)
Forces: <0.1 kJ/mol/nm (<1e-3 fractional error)
Energy of optimized geometry: < 0.001 kcal/mol
RMSD from starting structure: < 0.001 Angstrom
Interaction energies: < 0.0001 kcal/mol
Multipole moments: < 0.001 Debye / Debye Angstrom
Multipole moments (optimized): < 0.01 Debye / Debye Angstrom
Vibrational frequencies: < 0.5 wavenumber (~ 1e-4 fractional error)
Vibrational eigenvectors: < 0.05 (on 11/2019, updated these)
"""
@classmethod
def setup_class(cls):
"""
setup any state specific to the execution of the given class (which usually contains tests).
"""
super(TestAmber99SB, cls).setup_class()
tinkerpath = which('testgrad')
# try to find mdrun_d or gmx_d
# gmx should be built with config -DGMX_DOUBLE=ON
gmxpath = which('mdrun_d') or which('gmx_d')
gmxsuffix = '_d'
# Tests will FAIL if use single precision gromacs
# gmxpath = which('mdrun') or which('gmx')
# gmxsuffix = ''
# self.logger.debug("\nBuilding options for target...\n")
cls.cwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(os.path.join(cls.cwd, "files", "amber_alaglu"))
cls.tmpfolder = os.path.join(cls.cwd, "files", "amber_alaglu", "temp")
if not os.path.exists(cls.tmpfolder):
os.makedirs(cls.tmpfolder)
os.chdir(cls.tmpfolder)
for i in ["topol.top", "shot.mdp", "a99sb.xml", "a99sb.prm", "all.gro", "all.arc", "AceGluNme.itp", "AceAlaNme.itp", "a99sb.itp"]:
os.system("ln -fs ../%s" % i)
cls.engines = OrderedDict()
# Set up GMX engine
if gmxpath != '':
cls.engines['GMX'] = GMX(coords="all.gro", gmx_top="topol.top", gmx_mdp="shot.mdp", gmxpath=gmxpath, gmxsuffix=gmxsuffix)
else:
logger.warn("GROMACS cannot be found, skipping GMX tests.")
# Set up TINKER engine
if tinkerpath != '':
cls.engines['TINKER'] = TINKER(coords="all.arc", tinker_key="alaglu.key", tinkerpath=tinkerpath)
else:
logger.warn("TINKER cannot be found, skipping TINKER tests.")
# Set up OpenMM engine
try:
import simtk.openmm
cls.engines['OpenMM'] = OpenMM(coords="all.gro", pdb="conf.pdb", ffxml="a99sb.xml", platname="Reference", precision="double")
except:
logger.warn("OpenMM cannot be imported, skipping OpenMM tests.")
@classmethod
def teardown_class(cls):
"""
teardown any state that was previously setup with a call to setup_class.
"""
os.chdir(cls.cwd)
# shutil.rmtree(cls.cwd, "files", "amber_alaglu", "temp")
def setup_method(self):
os.chdir(self.tmpfolder)
def test_energy_force(self):
""" Test GMX, OpenMM, and TINKER energy and forces using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER energy and forces using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.energy_force()
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_force.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_energy_force.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1][:,0], RefData[:,0], rtol=0, atol=0.01,
err_msg="%s energies do not match the reference" % (n1))
np.testing.assert_allclose(Data[n1][:,1:].flatten(), RefData[:,1:].flatten(),
rtol=0, atol=0.1, err_msg="%s forces do not match the reference" % (n1))
def test_optimized_geometries(self):
""" Test GMX, OpenMM, and TINKER optimized geometries and RMSD using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER optimized geometries and RMSD using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.energy_rmsd(5)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_optimized_geometries.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_optimized_geometries.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
print("%s vs Reference energies:" % n1, Data[n1][0], RefData[0])
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1][0], RefData[0], rtol=0, atol=0.001,
err_msg="%s optimized energies do not match the reference" % n1)
np.testing.assert_allclose(Data[n1][1], RefData[1], rtol=0, atol=0.001,
err_msg="%s RMSD from starting structure do not match the reference" % n1)
def test_interaction_energies(self):
""" Test GMX, OpenMM, and TINKER interaction energies using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER interaction energies using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.interaction_energy(fraga=list(range(22)), fragb=list(range(22, 49)))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_interaction_energies.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, Data[list(self.engines.keys())[0]])
fin = os.path.join(datadir, 'test_interaction_energies.dat')
RefData = np.loadtxt(fin)
for n1 in self.engines.keys():
np.testing.assert_allclose(Data[n1], RefData, rtol=0, atol=0.0001,
err_msg="%s interaction energies do not match the reference" % n1)
def test_multipole_moments(self):
""" Test GMX, OpenMM, and TINKER multipole moments using AMBER force field """
printcool("Test GMX, OpenMM, and TINKER multipole moments using AMBER force field")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.multipole_moments(shot=5, optimize=False)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['dipole'].values())))
fout = os.path.join(datadir, 'test_multipole_moments.quadrupole.dat')
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['quadrupole'].values())))
RefDip = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.dipole.dat'))
RefQuad = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.quadrupole.dat'))
for n1 in self.engines.keys():
d1 = np.array(list(Data[n1]['dipole'].values()))
q1 = np.array(list(Data[n1]['quadrupole'].values()))
np.testing.assert_allclose(d1, RefDip, rtol=0, atol=0.001, err_msg="%s dipole moments do not match the reference" % n1)
np.testing.assert_allclose(q1, RefQuad, rtol=0, atol=0.001, err_msg="%s quadrupole moments do not match the reference" % n1)
def test_multipole_moments_optimized(self):
""" Test GMX, OpenMM, and TINKER multipole moments at optimized geometries """
#==================================================#
#| Geometry-optimized multipole moments; requires |#
#| double precision in order to pass! |#
#==================================================#
printcool("Test GMX, OpenMM, and TINKER multipole moments at optimized geometries")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
Data = OrderedDict()
for name, eng in self.engines.items():
Data[name] = eng.multipole_moments(shot=5, optimize=True)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['dipole'].values())))
fout = os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat')
np.savetxt(fout, np.array(list(Data[list(self.engines.keys())[0]]['quadrupole'].values())))
RefDip = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat'))
RefQuad = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat'))
for n1 in self.engines.keys():
d1 = np.array(list(Data[n1]['dipole'].values()))
q1 = np.array(list(Data[n1]['quadrupole'].values()))
np.testing.assert_allclose(d1, RefDip, rtol=0, atol=0.02, err_msg="%s dipole moments at optimized geometry do not match the reference" % n1)
np.testing.assert_allclose(q1, RefQuad, rtol=0, atol=0.02, err_msg="%s quadrupole moments at optimized geometry do not match the reference" % n1)
def test_normal_modes(self):
""" Test GMX TINKER and OpenMM normal modes """
printcool("Test GMX, TINKER, OpenMM normal modes")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
FreqG, ModeG = self.engines['GMX'].normal_modes(shot=5, optimize=False)
FreqT, ModeT = self.engines['TINKER'].normal_modes(shot=5, optimize=False)
FreqO, ModeO = self.engines['OpenMM'].normal_modes(shot=5, optimize=False)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_normal_modes.freq.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, FreqT)
fout = os.path.join(datadir, 'test_normal_modes.mode.dat.npy')
# Need to save as binary data since it's a multidimensional array
np.save(fout, ModeT)
FreqRef = np.loadtxt(os.path.join(datadir, 'test_normal_modes.freq.dat'))
ModeRef = np.load(os.path.join(datadir, 'test_normal_modes.mode.dat.npy'))
for Freq, Mode, Name in [(FreqG, ModeG, 'GMX'), (FreqT, ModeT, 'TINKER'), (FreqO, ModeO, 'OpenMM')]:
iv = -1
for v, vr, m, mr in zip(Freq, FreqRef, Mode, ModeRef):
iv += 1
# Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3.
if vr < 0: continue# or iv < 3: continue
# Frequency tolerance is half a wavenumber.
np.testing.assert_allclose(v, vr, rtol=0, atol=0.5,
err_msg="%s vibrational frequencies do not match the reference" % Name)
delta = 0.05
for a in range(len(m)):
try:
np.testing.assert_allclose(m[a], mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
except:
np.testing.assert_allclose(m[a], -1.0*mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
def test_normal_modes_optimized(self):
""" Test GMX TINKER and OpenMM normal modes at optimized geometry """
printcool("Test GMX, TINKER, OpenMM normal modes at optimized geometry")
missing_pkgs = []
for eng in ['TINKER', 'GMX', 'OpenMM']:
if eng not in self.engines:
missing_pkgs.append(eng)
if len(missing_pkgs) > 0:
pytest.skip("Missing packages: %s" % ', '.join(missing_pkgs))
FreqG, ModeG = self.engines['GMX'].normal_modes(shot=5, optimize=True)
FreqT, ModeT = self.engines['TINKER'].normal_modes(shot=5, optimize=True)
FreqO, ModeO = self.engines['OpenMM'].normal_modes(shot=5, optimize=True)
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_normal_modes_optimized.freq.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, FreqT)
fout = os.path.join(datadir, 'test_normal_modes_optimized.mode.dat')
# Need to save as binary data since it's a multidimensional array
np.save(fout, ModeT)
FreqRef = np.loadtxt(os.path.join(datadir, 'test_normal_modes_optimized.freq.dat'))
ModeRef = np.load(os.path.join(datadir, 'test_normal_modes_optimized.mode.dat.npy'))
for Freq, Mode, Name in [(FreqG, ModeG, 'GMX'), (FreqT, ModeT, 'TINKER'), (FreqO, ModeO, 'OpenMM')]:
iv = -1
for v, vr, m, mr in zip(Freq, FreqRef, Mode, ModeRef):
iv += 1
# Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3.
if vr < 0: continue# or iv < 3: continue
# Frequency tolerance is half a wavenumber.
np.testing.assert_allclose(v, vr, rtol=0, atol=0.5,
err_msg="%s vibrational frequencies do not match the reference" % Name)
delta = 0.05
for a in range(len(m)):
try:
np.testing.assert_allclose(m[a], mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
except:
np.testing.assert_allclose(m[a], -1.0*mr[a], rtol=0, atol=delta,
err_msg="%s normal modes do not match the reference" % Name)
class TestAmoebaWater6(ForceBalanceTestCase):
""" AMOEBA unit test consisting of a water hexamer. The test
checks for whether the OpenMM and TINKER Engines produce
consistent results for:
1) Single-point energy and force
2) Minimized energies and RMSD from the initial geometry
3) Interaction energies between two groups of molecules
4) Multipole moments
5) Multipole moments after geometry optimization
Due to careful validation of OpenMM, the results agree with TINKER
to within very stringent criteria. Residual errors are as follows:
Potential energies: <0.001 kJ/mol (<1e-5 fractional error)
Forces: <0.01 kJ/mol/nm (<1e-4 fractional error)
Energy of optimized geometry: < 0.0001 kcal/mol
RMSD from starting structure: < 0.001 Angstrom
Interaction energies: < 0.0001 kcal/mol
Multipole moments: < 0.001 Debye / Debye Angstrom
Multipole moments (optimized): < 0.01 Debye / Debye Angstrom
"""
@classmethod
def setup_class(cls):
super(TestAmoebaWater6, cls).setup_class()
#self.logger.debug("\nBuilding options for target...\n")
cls.cwd = os.path.dirname(os.path.realpath(__file__))
os.chdir(os.path.join(cls.cwd, "files", "amoeba_h2o6"))
cls.tmpfolder = os.path.join(cls.cwd, "files", "amoeba_h2o6", "temp")
if not os.path.exists(cls.tmpfolder):
os.makedirs(cls.tmpfolder)
os.chdir(cls.tmpfolder)
os.system("ln -s ../prism.pdb")
os.system("ln -s ../prism.key")
os.system("ln -s ../hex.arc")
os.system("ln -s ../water.prm")
os.system("ln -s ../amoebawater.xml")
cls.O = OpenMM(coords="hex.arc", pdb="prism.pdb", ffxml="amoebawater.xml", precision="double", \
mmopts={'rigidWater':False, 'mutualInducedTargetEpsilon':1e-6})
tinkerpath = which('testgrad')
if tinkerpath:
cls.T = TINKER(coords="hex.arc", tinker_key="prism.key", tinkerpath=tinkerpath)
@classmethod
def teardown_class(cls):
"""
teardown any state that was previously setup with a call to setup_class.
"""
os.chdir(cls.cwd)
# shutil.rmtree(cls.cwd, "files", "amoeba_h2o6", "temp")
def setup_method(self):
os.chdir(self.tmpfolder)
def test_energy_force(self):
""" Test OpenMM and TINKER energy and forces with AMOEBA force field """
printcool("Testing OpenMM and TINKER energy and force with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
EF_O = self.O.energy_force()[0]
EF_T = self.T.energy_force()[0]
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_force.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, EF_T)
EF_R = np.loadtxt(os.path.join(datadir, 'test_energy_force.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA energy to within 0.001 kJ\n")
np.testing.assert_allclose(EF_O[0], EF_R[0],
err_msg="OpenMM energy does not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(EF_T[0], EF_R[0],
err_msg="TINKER energy does not match the reference", rtol=0, atol=0.001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA force to within 0.01 kJ/mol/nm\n")
np.testing.assert_allclose(EF_O[1:], EF_R[1:],
err_msg="OpenMM forces do not match the reference", rtol=0, atol=0.01)
np.testing.assert_allclose(EF_T[1:], EF_R[1:],
err_msg="TINKER forces do not match the reference", rtol=0, atol=0.01)
def test_energy_rmsd(self):
""" Test OpenMM and TINKER optimized geometries with AMOEBA force field """
pytest.skip("Need to reduce dependence on the TINKER build")
printcool("Testing OpenMM and TINKER optimized geometry with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
EO, RO = self.O.energy_rmsd()
ET, RT = self.T.energy_rmsd()
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_energy_rmsd.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array([ET, RT]))
RefData = os.path.join(datadir, 'test_energy_rmsd.dat')
ERef = RefData[0]
RRef = RefData[1]
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct minimized energy to within 0.0001 kcal\n")
np.testing.assert_allclose(EO, ERef,
err_msg="OpenMM minimized energy does not match the reference", rtol=0, atol=0.0001)
np.testing.assert_allclose(ET, ERef,
err_msg="TINKER minimized energy does not match the reference", rtol=0, atol=0.0001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct RMSD to starting structure\n")
np.testing.assert_allclose(RO, RRef,
err_msg="OpenMM RMSD does not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(RT, RRef,
err_msg="TINKER RMSD does not match the reference", rtol=0, atol=0.001)
def test_interaction_energy(self):
""" Test OpenMM and TINKER interaction energies with AMOEBA force field """
printcool("Testing OpenMM and TINKER interaction energy with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
IO = self.O.interaction_energy(fraga=list(range(9)), fragb=list(range(9, 18)))
IT = self.T.interaction_energy(fraga=list(range(9)), fragb=list(range(9, 18)))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_interaction_energy.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, np.array([IT]))
IR = np.loadtxt(os.path.join(datadir, 'test_interaction_energy.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct interaction energy\n")
np.testing.assert_allclose(IO, IR,
err_msg="OpenMM interaction energies do not match the reference", rtol=0, atol=0.0001)
np.testing.assert_allclose(IT, IR,
err_msg="TINKER interaction energies do not match the reference", rtol=0, atol=0.0001)
def test_multipole_moments(self):
""" Test OpenMM and TINKER multipole moments with AMOEBA force field """
printcool("Testing OpenMM and TINKER multipole moments with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
MO = self.O.multipole_moments(optimize=False)
DO = np.array(list(MO['dipole'].values()))
QO = np.array(list(MO['quadrupole'].values()))
MT = self.T.multipole_moments(optimize=False)
DT = np.array(list(MT['dipole'].values()))
QT = np.array(list(MT['quadrupole'].values()))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, DT)
fout = os.path.join(datadir, 'test_multipole_moments.quadrupole.dat')
np.savetxt(fout, QT)
DR = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.dipole.dat'))
QR = np.loadtxt(os.path.join(datadir, 'test_multipole_moments.quadrupole.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole\n")
np.testing.assert_allclose(DO, DR,
err_msg="OpenMM dipoles do not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(DT, DR,
err_msg="TINKER dipoles do not match the reference", rtol=0, atol=0.001)
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole\n")
np.testing.assert_allclose(QO, QR,
err_msg="OpenMM quadrupoles do not match the reference", rtol=0, atol=0.001)
np.testing.assert_allclose(QT, QR,
err_msg="TINKER quadrupoles do not match the reference", rtol=0, atol=0.001)
def test_multipole_moments_optimized(self):
""" Test OpenMM and TINKER multipole moments with AMOEBA force field """
pytest.skip("Need to reduce dependence on the TINKER build")
printcool("Testing OpenMM and TINKER multipole moments with AMOEBA")
if not hasattr(self, 'T'):
pytest.skip("TINKER programs are not in the PATH.")
MO1 = self.O.multipole_moments(optimize=True)
DO1 = np.array(list(MO1['dipole'].values()))
QO1 = np.array(list(MO1['quadrupole'].values()))
MT1 = self.T.multipole_moments(optimize=True)
DT1 = np.array(list(MT1['dipole'].values()))
QT1 = np.array(list(MT1['quadrupole'].values()))
datadir = os.path.join(self.cwd, 'files', 'test_engine', self.__class__.__name__)
if SAVEDATA:
fout = os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat')
if not os.path.exists(os.path.dirname(fout)): os.makedirs(os.path.dirname(fout))
np.savetxt(fout, DT1)
fout = os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat')
np.savetxt(fout, QT1)
DR1 = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.dipole.dat'))
QR1 = np.loadtxt(os.path.join(datadir, 'test_multipole_moments_optimized.quadrupole.dat'))
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole when geometries are optimized\n")
np.testing.assert_allclose(DO1, DR1, rtol=0, atol=0.001,
err_msg="OpenMM dipoles do not match the reference when geometries are optimized")
np.testing.assert_allclose(DT1, DR1, rtol=0, atol=0.001,
err_msg="TINKER dipoles do not match the reference when geometries are optimized")
#self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole when geometries are optimized\n")
np.testing.assert_allclose(QO1, QR1, rtol=0, atol=0.01,
err_msg="OpenMM quadrupoles do not match the reference when geometries are optimized")
np.testing.assert_allclose(QT1, QR1, rtol=0, atol=0.01,
err_msg="TINKER quadrupoles do not match the reference when geometries are optimized")
|
en
| 0.785083
|
# Set SAVEDATA to True and run the tests in order to save data # to a file for future reference. This is easier to use for troubleshooting # vs. comparing multiple programs against each other, b/c we don't know # which one changed. Amber99SB unit test consisting of ten structures of ACE-ALA-NME interacting with ACE-GLU-NME. The tests check for whether the OpenMM, GMX, and TINKER Engines produce consistent results for: 1) Single-point energies and forces over all ten structures 2) Minimized energies and RMSD from the initial geometry for a selected structure 3) Interaction energies between the two molecules over all ten structures 4) Multipole moments of a selected structure 5) Multipole moments of a selected structure after geometry optimization 6) Normal modes of a selected structure 7) Normal modes of a selected structure after geometry optimization If the engines are setting up the calculation correctly, then the remaining differences between results are due to differences in the parameter files or software implementations. The criteria in this unit test are more stringent than normal simulations. In order for the software packages to agree to within the criteria, I had to do the following: - Remove improper dihedrals from the force field, because there is an ambiguity in the atom ordering which leads to force differences - Increase the number of decimal points in the "fudgeQQ" parameter in the GROMACS .itp file - Increase two torsional barriers to ensure optimizer converges to the same local minimum consistently - Change the "electric" conversion factor in the TINKER .prm file - Compile GROMACS in double precision Residual errors are as follows: Potential energies: <0.01 kJ/mol (<1e-4 fractional error) Forces: <0.1 kJ/mol/nm (<1e-3 fractional error) Energy of optimized geometry: < 0.001 kcal/mol RMSD from starting structure: < 0.001 Angstrom Interaction energies: < 0.0001 kcal/mol Multipole moments: < 0.001 Debye / Debye Angstrom Multipole moments (optimized): < 0.01 Debye / Debye Angstrom Vibrational frequencies: < 0.5 wavenumber (~ 1e-4 fractional error) Vibrational eigenvectors: < 0.05 (on 11/2019, updated these) setup any state specific to the execution of the given class (which usually contains tests). # try to find mdrun_d or gmx_d # gmx should be built with config -DGMX_DOUBLE=ON # Tests will FAIL if use single precision gromacs # gmxpath = which('mdrun') or which('gmx') # gmxsuffix = '' # self.logger.debug("\nBuilding options for target...\n") # Set up GMX engine # Set up TINKER engine # Set up OpenMM engine teardown any state that was previously setup with a call to setup_class. # shutil.rmtree(cls.cwd, "files", "amber_alaglu", "temp") Test GMX, OpenMM, and TINKER energy and forces using AMBER force field Test GMX, OpenMM, and TINKER optimized geometries and RMSD using AMBER force field Test GMX, OpenMM, and TINKER interaction energies using AMBER force field Test GMX, OpenMM, and TINKER multipole moments using AMBER force field Test GMX, OpenMM, and TINKER multipole moments at optimized geometries #==================================================# #| Geometry-optimized multipole moments; requires |# #| double precision in order to pass! |# #==================================================# Test GMX TINKER and OpenMM normal modes # Need to save as binary data since it's a multidimensional array # Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3. # or iv < 3: continue # Frequency tolerance is half a wavenumber. Test GMX TINKER and OpenMM normal modes at optimized geometry # Need to save as binary data since it's a multidimensional array # Count vibrational modes. Stochastic issue seems to occur for a mode within the lowest 3. # or iv < 3: continue # Frequency tolerance is half a wavenumber. AMOEBA unit test consisting of a water hexamer. The test checks for whether the OpenMM and TINKER Engines produce consistent results for: 1) Single-point energy and force 2) Minimized energies and RMSD from the initial geometry 3) Interaction energies between two groups of molecules 4) Multipole moments 5) Multipole moments after geometry optimization Due to careful validation of OpenMM, the results agree with TINKER to within very stringent criteria. Residual errors are as follows: Potential energies: <0.001 kJ/mol (<1e-5 fractional error) Forces: <0.01 kJ/mol/nm (<1e-4 fractional error) Energy of optimized geometry: < 0.0001 kcal/mol RMSD from starting structure: < 0.001 Angstrom Interaction energies: < 0.0001 kcal/mol Multipole moments: < 0.001 Debye / Debye Angstrom Multipole moments (optimized): < 0.01 Debye / Debye Angstrom #self.logger.debug("\nBuilding options for target...\n") teardown any state that was previously setup with a call to setup_class. # shutil.rmtree(cls.cwd, "files", "amoeba_h2o6", "temp") Test OpenMM and TINKER energy and forces with AMOEBA force field #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA energy to within 0.001 kJ\n") #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct AMOEBA force to within 0.01 kJ/mol/nm\n") Test OpenMM and TINKER optimized geometries with AMOEBA force field #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct minimized energy to within 0.0001 kcal\n") #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct RMSD to starting structure\n") Test OpenMM and TINKER interaction energies with AMOEBA force field #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct interaction energy\n") Test OpenMM and TINKER multipole moments with AMOEBA force field #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole\n") #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole\n") Test OpenMM and TINKER multipole moments with AMOEBA force field #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct dipole when geometries are optimized\n") #self.logger.debug(">ASSERT OpenMM and TINKER Engines give the correct quadrupole when geometries are optimized\n")
| 1.823166
| 2
|
setup.py
|
foxik/pybox2d
| 0
|
6625644
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for pybox2d.
For installation instructions, see INSTALL.
Basic install steps:
python setup.py build
If that worked, then:
python setup.py install
"""
import os
import sys
from glob import glob
__author__='<NAME>'
__license__='zlib'
import setuptools
from setuptools import (setup, Extension)
from setuptools.command.build_py import build_py
class build_py_after_build_ext(build_py):
def run(self):
self.run_command('build_ext')
return super().run()
# release version number
box2d_version = '2.3'
release_number = 10
# create the version string
version_str = "%s.%s" % (box2d_version, release_number)
# setup some paths and names
library_base='library' # the directory where the egg base will be for setuptools develop command
library_name='Box2D' # the final name that the library should end up being
library_path=os.path.join(library_base, library_name) # library/Box2D (e.g.)
source_dir='Box2D' # where all of the C++ and SWIG source resides
swig_source='Box2D.i' # the main SWIG source file
use_kwargs=True # whether or not to default creating kwargs for all functions
def write_init():
# read in the license header
license_header = open(os.path.join(source_dir, 'pybox2d_license_header.txt')).read()
init_source = [
"from .%s import *" % library_name,
"__version__ = '%s'" % version_str,
"__version_info__ = (%s,%d)" % (box2d_version.replace('.', ','), release_number),
"__license__ = '%s'" % __license__ ,
]
# and create the __init__ file with the appropriate version string
f=open(os.path.join(library_path, '__init__.py'), 'w')
f.write(license_header)
f.write( '\n'.join(init_source) )
f.close()
source_paths = [
os.path.join(source_dir, 'Dynamics'),
os.path.join(source_dir, 'Dynamics', 'Contacts'),
os.path.join(source_dir, 'Dynamics', 'Joints'),
os.path.join(source_dir, 'Common'),
os.path.join(source_dir, 'Collision'),
os.path.join(source_dir, 'Collision', 'Shapes'),
]
# glob all of the paths and then flatten the list into one
box2d_source_files = ( [os.path.join(source_dir, 'Box2D_wrap.cpp')] + #[os.path.join(source_dir, swig_source)] + \
sum( [glob(os.path.join(path, "*.cpp")) for path in source_paths], [])
)
# arguments to pass to SWIG. for old versions of SWIG, -O (optimize) might not be present.
# Defaults:
# -O optimize, -includeall follow all include statements, -globals changes cvar->b2Globals
# -Isource_dir adds source dir to include path, -outdir library_path sets the output directory
# -small makes the Box2D_wrap.cpp file almost unreadable, but faster to compile. If you want
# to try to understand it for whatever reason, I'd recommend removing that option.
swig_arguments = \
'-c++ -I%s -small -O -includeall -ignoremissing -w201 -globals b2Globals -outdir %s' \
% (source_dir, library_path)
if use_kwargs:
# turn off the warnings about functions that can't use kwargs (-w511)
# and let the wrapper know we're using kwargs (-D_SWIG_KWARGS)
swig_arguments += " -keyword -w511 -D_SWIG_KWARGS"
# depending on the platform, add extra compilation arguments. hopefully if the platform
# isn't windows, g++ will be used; -Wno-unused then would suppress some annoying warnings
# about the Box2D source.
if sys.platform in ('win32', 'win64'):
extra_args=['-I.', '-fpermissive']
else:
extra_args=['-I.', '-Wno-unused']
pybox2d_extension = Extension(
'Box2D._Box2D', box2d_source_files, extra_compile_args=extra_args,
language='c++')
LONG_DESCRIPTION = """
Custom-build wheels of pybox2d library.
pybox2d homepage: https://github.com/pybox2d/pybox2d
Box2D homepage: http://www.box2d.org
""".strip()
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries :: pygame",
]
write_init()
print(setuptools.find_packages('library'))
setup_dict = dict(
name = "ufal.pybox2d",
version = version_str,
author = "<NAME>",
author_email = "<EMAIL>",
description = "Custom-build wheels of pybox2d library from http://github.com/pybox2d/pybox2d",
license = "zlib",
url = "http://github.com/foxik/pybox2d",
long_description = LONG_DESCRIPTION,
package_dir = {'': 'library'},
packages = setuptools.find_packages(library_base),
test_suite = 'tests',
options = { 'build_ext': { 'swig_opts' : swig_arguments },
'egg_info' : { 'egg_base' : library_base },
},
ext_modules = [ pybox2d_extension ],
include_package_data=True,
cmdclass={"build_py": build_py_after_build_ext},
)
# run the actual setup from distutils
setup(**setup_dict)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Setup script for pybox2d.
For installation instructions, see INSTALL.
Basic install steps:
python setup.py build
If that worked, then:
python setup.py install
"""
import os
import sys
from glob import glob
__author__='<NAME>'
__license__='zlib'
import setuptools
from setuptools import (setup, Extension)
from setuptools.command.build_py import build_py
class build_py_after_build_ext(build_py):
def run(self):
self.run_command('build_ext')
return super().run()
# release version number
box2d_version = '2.3'
release_number = 10
# create the version string
version_str = "%s.%s" % (box2d_version, release_number)
# setup some paths and names
library_base='library' # the directory where the egg base will be for setuptools develop command
library_name='Box2D' # the final name that the library should end up being
library_path=os.path.join(library_base, library_name) # library/Box2D (e.g.)
source_dir='Box2D' # where all of the C++ and SWIG source resides
swig_source='Box2D.i' # the main SWIG source file
use_kwargs=True # whether or not to default creating kwargs for all functions
def write_init():
# read in the license header
license_header = open(os.path.join(source_dir, 'pybox2d_license_header.txt')).read()
init_source = [
"from .%s import *" % library_name,
"__version__ = '%s'" % version_str,
"__version_info__ = (%s,%d)" % (box2d_version.replace('.', ','), release_number),
"__license__ = '%s'" % __license__ ,
]
# and create the __init__ file with the appropriate version string
f=open(os.path.join(library_path, '__init__.py'), 'w')
f.write(license_header)
f.write( '\n'.join(init_source) )
f.close()
source_paths = [
os.path.join(source_dir, 'Dynamics'),
os.path.join(source_dir, 'Dynamics', 'Contacts'),
os.path.join(source_dir, 'Dynamics', 'Joints'),
os.path.join(source_dir, 'Common'),
os.path.join(source_dir, 'Collision'),
os.path.join(source_dir, 'Collision', 'Shapes'),
]
# glob all of the paths and then flatten the list into one
box2d_source_files = ( [os.path.join(source_dir, 'Box2D_wrap.cpp')] + #[os.path.join(source_dir, swig_source)] + \
sum( [glob(os.path.join(path, "*.cpp")) for path in source_paths], [])
)
# arguments to pass to SWIG. for old versions of SWIG, -O (optimize) might not be present.
# Defaults:
# -O optimize, -includeall follow all include statements, -globals changes cvar->b2Globals
# -Isource_dir adds source dir to include path, -outdir library_path sets the output directory
# -small makes the Box2D_wrap.cpp file almost unreadable, but faster to compile. If you want
# to try to understand it for whatever reason, I'd recommend removing that option.
swig_arguments = \
'-c++ -I%s -small -O -includeall -ignoremissing -w201 -globals b2Globals -outdir %s' \
% (source_dir, library_path)
if use_kwargs:
# turn off the warnings about functions that can't use kwargs (-w511)
# and let the wrapper know we're using kwargs (-D_SWIG_KWARGS)
swig_arguments += " -keyword -w511 -D_SWIG_KWARGS"
# depending on the platform, add extra compilation arguments. hopefully if the platform
# isn't windows, g++ will be used; -Wno-unused then would suppress some annoying warnings
# about the Box2D source.
if sys.platform in ('win32', 'win64'):
extra_args=['-I.', '-fpermissive']
else:
extra_args=['-I.', '-Wno-unused']
pybox2d_extension = Extension(
'Box2D._Box2D', box2d_source_files, extra_compile_args=extra_args,
language='c++')
LONG_DESCRIPTION = """
Custom-build wheels of pybox2d library.
pybox2d homepage: https://github.com/pybox2d/pybox2d
Box2D homepage: http://www.box2d.org
""".strip()
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: zlib/libpng License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries :: pygame",
]
write_init()
print(setuptools.find_packages('library'))
setup_dict = dict(
name = "ufal.pybox2d",
version = version_str,
author = "<NAME>",
author_email = "<EMAIL>",
description = "Custom-build wheels of pybox2d library from http://github.com/pybox2d/pybox2d",
license = "zlib",
url = "http://github.com/foxik/pybox2d",
long_description = LONG_DESCRIPTION,
package_dir = {'': 'library'},
packages = setuptools.find_packages(library_base),
test_suite = 'tests',
options = { 'build_ext': { 'swig_opts' : swig_arguments },
'egg_info' : { 'egg_base' : library_base },
},
ext_modules = [ pybox2d_extension ],
include_package_data=True,
cmdclass={"build_py": build_py_after_build_ext},
)
# run the actual setup from distutils
setup(**setup_dict)
|
en
| 0.781318
|
#!/usr/bin/env python # -*- coding: utf-8 -*- Setup script for pybox2d. For installation instructions, see INSTALL. Basic install steps: python setup.py build If that worked, then: python setup.py install # release version number # create the version string # setup some paths and names # the directory where the egg base will be for setuptools develop command # the final name that the library should end up being # library/Box2D (e.g.) # where all of the C++ and SWIG source resides # the main SWIG source file # whether or not to default creating kwargs for all functions # read in the license header # and create the __init__ file with the appropriate version string # glob all of the paths and then flatten the list into one #[os.path.join(source_dir, swig_source)] + \ # arguments to pass to SWIG. for old versions of SWIG, -O (optimize) might not be present. # Defaults: # -O optimize, -includeall follow all include statements, -globals changes cvar->b2Globals # -Isource_dir adds source dir to include path, -outdir library_path sets the output directory # -small makes the Box2D_wrap.cpp file almost unreadable, but faster to compile. If you want # to try to understand it for whatever reason, I'd recommend removing that option. # turn off the warnings about functions that can't use kwargs (-w511) # and let the wrapper know we're using kwargs (-D_SWIG_KWARGS) # depending on the platform, add extra compilation arguments. hopefully if the platform # isn't windows, g++ will be used; -Wno-unused then would suppress some annoying warnings # about the Box2D source. Custom-build wheels of pybox2d library. pybox2d homepage: https://github.com/pybox2d/pybox2d Box2D homepage: http://www.box2d.org # run the actual setup from distutils
| 2.459792
| 2
|
blog/migrations/0007_auto_20200418_2041.py
|
AndriiOshtuk/MDN-DIY-mini-blog
| 0
|
6625645
|
# Generated by Django 3.0.3 on 2020-04-18 17:41
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20200418_2034'),
]
operations = [
migrations.AlterField(
model_name='post',
name='post_date',
field=models.DateField(default=datetime.date(2020, 4, 18)),
),
]
|
# Generated by Django 3.0.3 on 2020-04-18 17:41
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_auto_20200418_2034'),
]
operations = [
migrations.AlterField(
model_name='post',
name='post_date',
field=models.DateField(default=datetime.date(2020, 4, 18)),
),
]
|
en
| 0.805719
|
# Generated by Django 3.0.3 on 2020-04-18 17:41
| 1.637966
| 2
|
src/python/pants/task/scm_publish_mixin.py
|
revl/pants
| 1
|
6625646
|
<reponame>revl/pants<filename>src/python/pants/task/scm_publish_mixin.py
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import traceback
from abc import abstractmethod
from functools import total_ordering
from pants.base.exceptions import TaskError
from pants.scm.scm import Scm
class Version:
@staticmethod
def parse(version):
"""Attempts to parse the given string as Semver, then falls back to Namedver."""
try:
return Semver.parse(version)
except ValueError:
return Namedver.parse(version)
@abstractmethod
def version(self):
"""Returns the string representation of this Version."""
@total_ordering
class Namedver(Version):
"""A less restrictive versioning scheme that does not conflict with Semver.
Its important to not allow certain characters that are used in maven for performing
range matching like *><=! See:
https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
"""
_VALID_NAME = re.compile("^[-_.A-Za-z0-9]+$")
_INVALID_NAME = re.compile("^[-_.]*$")
@classmethod
def parse(cls, version):
# must not contain whitespace
if not cls._VALID_NAME.match(version):
raise ValueError(
"Named versions must match {}: '{}'".format(cls._VALID_NAME.pattern, version)
)
if cls._INVALID_NAME.match(version):
raise ValueError("Named version must contain at least one alphanumeric character")
# must not be valid semver
try:
Semver.parse(version)
except ValueError:
return Namedver(version)
else:
raise ValueError(
"Named versions must not be valid semantic versions: '{0}'".format(version)
)
def __init__(self, version):
self._version = version
def version(self):
return self._version
def __eq__(self, other):
return self._version == other._version
def __lt__(self, other):
raise ValueError("{0} is not comparable to {1}".format(self, other))
def __repr__(self):
return "Namedver({0})".format(self.version())
@total_ordering
class Semver(Version):
"""Semantic versioning.
See http://semver.org
"""
@staticmethod
def parse(version):
components = version.split(".", 3)
if len(components) != 3:
raise ValueError
major, minor, patch = components
def to_i(component):
try:
return int(component)
except (TypeError, ValueError):
raise ValueError(
"Invalid revision component {} in {} - "
"must be an integer".format(component, version)
)
return Semver(to_i(major), to_i(minor), to_i(patch))
def __init__(self, major, minor, patch, snapshot=False):
self.major = major
self.minor = minor
self.patch = patch
self.snapshot = snapshot
def bump(self):
# A bump of a snapshot discards snapshot status
return Semver(self.major, self.minor, self.patch + 1)
def make_snapshot(self):
return Semver(self.major, self.minor, self.patch, snapshot=True)
def version(self):
return "{}.{}.{}".format(
self.major,
self.minor,
("{}-SNAPSHOT".format(self.patch)) if self.snapshot else self.patch,
)
def __eq__(self, other):
return (self.major, self.minor, self.patch, self.snapshot) == (
other.major,
other.minor,
other.patch,
other.snapshot,
)
def __lt__(self, other):
diff = self.major - other.major
if diff:
return self.major < other.major
diff = self.minor - other.minor
if diff:
return self.patch < other.patch
return self.snapshot < other.snapshot
def __repr__(self):
return "Semver({})".format(self.version())
class ScmPublishMixin:
"""A mixin for tasks that provides methods for publishing pushdbs via scm.
Requires that the mixing task class
* has the properties scm and log,
* has the method get_options
"""
class InvalidBranchError(TaskError):
"""Indicates the current branch is not an allowed branch to publish from."""
class InvalidRemoteError(TaskError):
"""Indicates the current default remote server is not an allowed remote server to publish
to."""
class DirtyWorkspaceError(TaskError):
"""Indicates the current workspace is dirty and thus unsuitable for publishing from."""
_SCM_PUSH_ATTEMPTS = 5
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--scm-push-attempts",
type=int,
default=cls._SCM_PUSH_ATTEMPTS,
help="Try pushing the pushdb to the SCM this many times before aborting.",
)
register(
"--restrict-push-branches",
advanced=True,
type=list,
help="Allow pushes only from one of these branches.",
)
register(
"--restrict-push-urls",
advanced=True,
type=list,
help="Allow pushes to only one of these urls.",
)
register(
"--verify-commit",
advanced=True,
type=bool,
default=True,
help='Whether or not to "verify" commits made using SCM publishing. For git, this '
"means running commit hooks.",
)
@property
def restrict_push_branches(self):
return self.get_options().restrict_push_branches
@property
def restrict_push_urls(self):
return self.get_options().restrict_push_urls
@property
def scm_push_attempts(self):
return self.get_options().scm_push_attempts
def check_clean_master(self, commit=False):
"""Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure
"""
if commit:
if self.restrict_push_branches:
branch = self.scm.branch_name
if branch not in self.restrict_push_branches:
raise self.InvalidBranchError(
"Can only push from {}, currently on branch: {}".format(
" ".join(sorted(self.restrict_push_branches)), branch
)
)
if self.restrict_push_urls:
url = self.scm.server_url
if url not in self.restrict_push_urls:
raise self.InvalidRemoteError(
"Can only push to {}, currently the remote url is: {}".format(
" ".join(sorted(self.restrict_push_urls)), url
)
)
changed_files = self.scm.changed_files()
if changed_files:
raise self.DirtyWorkspaceError(
"Can only push from a clean branch, found : {}".format(" ".join(changed_files))
)
elif self.scm:
self.log.info(
"Skipping check for a clean {} branch in test mode.".format(self.scm.branch_name)
)
def commit_pushdb(self, coordinates, postscript=None):
"""Commit changes to the pushdb with a message containing the provided coordinates."""
self.scm.commit(
"pants build committing publish data for push of {coordinates}"
"{postscript}".format(coordinates=coordinates, postscript=postscript or ""),
verify=self.get_options().verify_commit,
)
def publish_pushdb_changes_to_remote_scm(
self, pushdb_file, coordinate, tag_name, tag_message, postscript=None
):
"""Push pushdb changes to the remote scm repository, and then tag the commit if it
succeeds."""
self._add_pushdb(pushdb_file)
self.commit_pushdb(coordinate, postscript=postscript)
self._push_and_tag_changes(
tag_name=tag_name,
tag_message="{message}{postscript}".format(
message=tag_message, postscript=postscript or ""
),
)
def _add_pushdb(self, pushdb_file):
self.scm.add(pushdb_file)
def _push_and_tag_changes(self, tag_name, tag_message):
self._push_with_retry(self.scm, self.log, self.scm_push_attempts)
self.scm.tag(tag_name, tag_message)
@staticmethod
def _push_with_retry(scm, log, attempts):
global_scm_exception = None
for attempt in range(attempts):
try:
log.debug("Trying scm push")
scm.push()
break # success
except Scm.RemoteException as scm_exception:
global_scm_exception = scm_exception
log.debug("Scm push failed, trying to refresh.")
# This might fail in the event that there is a real conflict, throwing
# a Scm.LocalException (in case of a rebase failure) or a Scm.RemoteException
# in the case of a fetch failure. We'll directly raise a local exception,
# since we can't fix it by retrying, but if we do, we want to display the
# remote exception that caused the refresh as well just in case the user cares.
# Remote exceptions probably indicate network or configuration issues, so
# we'll let them propagate
try:
scm.refresh(leave_clean=True)
except Scm.LocalException as local_exception:
exc = traceback.format_exc()
log.debug("SCM exception while pushing: {}".format(exc))
raise local_exception
else: # no more retries
raise global_scm_exception
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
import traceback
from abc import abstractmethod
from functools import total_ordering
from pants.base.exceptions import TaskError
from pants.scm.scm import Scm
class Version:
@staticmethod
def parse(version):
"""Attempts to parse the given string as Semver, then falls back to Namedver."""
try:
return Semver.parse(version)
except ValueError:
return Namedver.parse(version)
@abstractmethod
def version(self):
"""Returns the string representation of this Version."""
@total_ordering
class Namedver(Version):
"""A less restrictive versioning scheme that does not conflict with Semver.
Its important to not allow certain characters that are used in maven for performing
range matching like *><=! See:
https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html
"""
_VALID_NAME = re.compile("^[-_.A-Za-z0-9]+$")
_INVALID_NAME = re.compile("^[-_.]*$")
@classmethod
def parse(cls, version):
# must not contain whitespace
if not cls._VALID_NAME.match(version):
raise ValueError(
"Named versions must match {}: '{}'".format(cls._VALID_NAME.pattern, version)
)
if cls._INVALID_NAME.match(version):
raise ValueError("Named version must contain at least one alphanumeric character")
# must not be valid semver
try:
Semver.parse(version)
except ValueError:
return Namedver(version)
else:
raise ValueError(
"Named versions must not be valid semantic versions: '{0}'".format(version)
)
def __init__(self, version):
self._version = version
def version(self):
return self._version
def __eq__(self, other):
return self._version == other._version
def __lt__(self, other):
raise ValueError("{0} is not comparable to {1}".format(self, other))
def __repr__(self):
return "Namedver({0})".format(self.version())
@total_ordering
class Semver(Version):
"""Semantic versioning.
See http://semver.org
"""
@staticmethod
def parse(version):
components = version.split(".", 3)
if len(components) != 3:
raise ValueError
major, minor, patch = components
def to_i(component):
try:
return int(component)
except (TypeError, ValueError):
raise ValueError(
"Invalid revision component {} in {} - "
"must be an integer".format(component, version)
)
return Semver(to_i(major), to_i(minor), to_i(patch))
def __init__(self, major, minor, patch, snapshot=False):
self.major = major
self.minor = minor
self.patch = patch
self.snapshot = snapshot
def bump(self):
# A bump of a snapshot discards snapshot status
return Semver(self.major, self.minor, self.patch + 1)
def make_snapshot(self):
return Semver(self.major, self.minor, self.patch, snapshot=True)
def version(self):
return "{}.{}.{}".format(
self.major,
self.minor,
("{}-SNAPSHOT".format(self.patch)) if self.snapshot else self.patch,
)
def __eq__(self, other):
return (self.major, self.minor, self.patch, self.snapshot) == (
other.major,
other.minor,
other.patch,
other.snapshot,
)
def __lt__(self, other):
diff = self.major - other.major
if diff:
return self.major < other.major
diff = self.minor - other.minor
if diff:
return self.patch < other.patch
return self.snapshot < other.snapshot
def __repr__(self):
return "Semver({})".format(self.version())
class ScmPublishMixin:
"""A mixin for tasks that provides methods for publishing pushdbs via scm.
Requires that the mixing task class
* has the properties scm and log,
* has the method get_options
"""
class InvalidBranchError(TaskError):
"""Indicates the current branch is not an allowed branch to publish from."""
class InvalidRemoteError(TaskError):
"""Indicates the current default remote server is not an allowed remote server to publish
to."""
class DirtyWorkspaceError(TaskError):
"""Indicates the current workspace is dirty and thus unsuitable for publishing from."""
_SCM_PUSH_ATTEMPTS = 5
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--scm-push-attempts",
type=int,
default=cls._SCM_PUSH_ATTEMPTS,
help="Try pushing the pushdb to the SCM this many times before aborting.",
)
register(
"--restrict-push-branches",
advanced=True,
type=list,
help="Allow pushes only from one of these branches.",
)
register(
"--restrict-push-urls",
advanced=True,
type=list,
help="Allow pushes to only one of these urls.",
)
register(
"--verify-commit",
advanced=True,
type=bool,
default=True,
help='Whether or not to "verify" commits made using SCM publishing. For git, this '
"means running commit hooks.",
)
@property
def restrict_push_branches(self):
return self.get_options().restrict_push_branches
@property
def restrict_push_urls(self):
return self.get_options().restrict_push_urls
@property
def scm_push_attempts(self):
return self.get_options().scm_push_attempts
def check_clean_master(self, commit=False):
"""Perform a sanity check on SCM publishing constraints.
Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push
to an allowed server if `commit` is `True`.
:param bool commit: `True` if a commit is in progress.
:raise TaskError: on failure
"""
if commit:
if self.restrict_push_branches:
branch = self.scm.branch_name
if branch not in self.restrict_push_branches:
raise self.InvalidBranchError(
"Can only push from {}, currently on branch: {}".format(
" ".join(sorted(self.restrict_push_branches)), branch
)
)
if self.restrict_push_urls:
url = self.scm.server_url
if url not in self.restrict_push_urls:
raise self.InvalidRemoteError(
"Can only push to {}, currently the remote url is: {}".format(
" ".join(sorted(self.restrict_push_urls)), url
)
)
changed_files = self.scm.changed_files()
if changed_files:
raise self.DirtyWorkspaceError(
"Can only push from a clean branch, found : {}".format(" ".join(changed_files))
)
elif self.scm:
self.log.info(
"Skipping check for a clean {} branch in test mode.".format(self.scm.branch_name)
)
def commit_pushdb(self, coordinates, postscript=None):
"""Commit changes to the pushdb with a message containing the provided coordinates."""
self.scm.commit(
"pants build committing publish data for push of {coordinates}"
"{postscript}".format(coordinates=coordinates, postscript=postscript or ""),
verify=self.get_options().verify_commit,
)
def publish_pushdb_changes_to_remote_scm(
self, pushdb_file, coordinate, tag_name, tag_message, postscript=None
):
"""Push pushdb changes to the remote scm repository, and then tag the commit if it
succeeds."""
self._add_pushdb(pushdb_file)
self.commit_pushdb(coordinate, postscript=postscript)
self._push_and_tag_changes(
tag_name=tag_name,
tag_message="{message}{postscript}".format(
message=tag_message, postscript=postscript or ""
),
)
def _add_pushdb(self, pushdb_file):
self.scm.add(pushdb_file)
def _push_and_tag_changes(self, tag_name, tag_message):
self._push_with_retry(self.scm, self.log, self.scm_push_attempts)
self.scm.tag(tag_name, tag_message)
@staticmethod
def _push_with_retry(scm, log, attempts):
global_scm_exception = None
for attempt in range(attempts):
try:
log.debug("Trying scm push")
scm.push()
break # success
except Scm.RemoteException as scm_exception:
global_scm_exception = scm_exception
log.debug("Scm push failed, trying to refresh.")
# This might fail in the event that there is a real conflict, throwing
# a Scm.LocalException (in case of a rebase failure) or a Scm.RemoteException
# in the case of a fetch failure. We'll directly raise a local exception,
# since we can't fix it by retrying, but if we do, we want to display the
# remote exception that caused the refresh as well just in case the user cares.
# Remote exceptions probably indicate network or configuration issues, so
# we'll let them propagate
try:
scm.refresh(leave_clean=True)
except Scm.LocalException as local_exception:
exc = traceback.format_exc()
log.debug("SCM exception while pushing: {}".format(exc))
raise local_exception
else: # no more retries
raise global_scm_exception
|
en
| 0.841756
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). Attempts to parse the given string as Semver, then falls back to Namedver. Returns the string representation of this Version. A less restrictive versioning scheme that does not conflict with Semver. Its important to not allow certain characters that are used in maven for performing range matching like *><=! See: https://maven.apache.org/enforcer/enforcer-rules/versionRanges.html # must not contain whitespace # must not be valid semver Semantic versioning. See http://semver.org # A bump of a snapshot discards snapshot status A mixin for tasks that provides methods for publishing pushdbs via scm. Requires that the mixing task class * has the properties scm and log, * has the method get_options Indicates the current branch is not an allowed branch to publish from. Indicates the current default remote server is not an allowed remote server to publish to. Indicates the current workspace is dirty and thus unsuitable for publishing from. Perform a sanity check on SCM publishing constraints. Checks for uncommitted tracked files and ensures we're on an allowed branch configured to push to an allowed server if `commit` is `True`. :param bool commit: `True` if a commit is in progress. :raise TaskError: on failure Commit changes to the pushdb with a message containing the provided coordinates. Push pushdb changes to the remote scm repository, and then tag the commit if it succeeds. # success # This might fail in the event that there is a real conflict, throwing # a Scm.LocalException (in case of a rebase failure) or a Scm.RemoteException # in the case of a fetch failure. We'll directly raise a local exception, # since we can't fix it by retrying, but if we do, we want to display the # remote exception that caused the refresh as well just in case the user cares. # Remote exceptions probably indicate network or configuration issues, so # we'll let them propagate # no more retries
| 2.186189
| 2
|
tests/forumsentry/test_errors.py
|
awalker125/forumsentry-sdk-for-python
| 2
|
6625647
|
<filename>tests/forumsentry/test_errors.py
'''
Created on 8 Dec 2017
@author: walandre
'''
import unittest
from forumsentry.errors import SerializationError
from forumsentry.errors import DeSerializationError
from forumsentry.errors import BadVerbError
from forumsentry.errors import ConfigError
from forumsentry.errors import NotSupportedError
from forumsentry.errors import InvalidTypeError
class TestErrors(unittest.TestCase):
def setUp(self):
self._BadVerbError = BadVerbError('test')
self._SerializationError = SerializationError('test')
self._DeSerializationError = DeSerializationError('test')
self._ConfigError = ConfigError('test')
self._NotSupportedError = NotSupportedError('test')
self._InvalidTypeError = InvalidTypeError('test')
def test_NotSupportedError_implements_str(self):
self.assertTrue(self._NotSupportedError.__str__ is not object.__str__)
string = self._NotSupportedError.__str__()
self.assertIn('invalid', string)
def test_InvalidTypeError_implements_str(self):
self.assertTrue(self._InvalidTypeError.__str__ is not object.__str__)
string = self._InvalidTypeError.__str__()
self.assertIn('invalid', string)
def test_ConfigError_implements_str(self):
self.assertTrue(self._ConfigError.__str__ is not object.__str__)
string = self._ConfigError.__str__()
self.assertIn('invalid', string)
def test_BadVerbError_implements_str(self):
self.assertTrue(self._BadVerbError.__str__ is not object.__str__)
string = self._BadVerbError.__str__()
self.assertIn('invalid', string)
def test_SerializationError_implements_str(self):
self.assertTrue(self._SerializationError.__str__ is not object.__str__)
string = self._SerializationError.__str__()
self.assertIn('invalid', string)
def test_DeSerializationError_implements_str(self):
self.assertTrue(self._DeSerializationError.__str__ is not object.__str__)
string = self._DeSerializationError.__str__()
self.assertIn('invalid', string)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testErrors']
unittest.main()
|
<filename>tests/forumsentry/test_errors.py
'''
Created on 8 Dec 2017
@author: walandre
'''
import unittest
from forumsentry.errors import SerializationError
from forumsentry.errors import DeSerializationError
from forumsentry.errors import BadVerbError
from forumsentry.errors import ConfigError
from forumsentry.errors import NotSupportedError
from forumsentry.errors import InvalidTypeError
class TestErrors(unittest.TestCase):
def setUp(self):
self._BadVerbError = BadVerbError('test')
self._SerializationError = SerializationError('test')
self._DeSerializationError = DeSerializationError('test')
self._ConfigError = ConfigError('test')
self._NotSupportedError = NotSupportedError('test')
self._InvalidTypeError = InvalidTypeError('test')
def test_NotSupportedError_implements_str(self):
self.assertTrue(self._NotSupportedError.__str__ is not object.__str__)
string = self._NotSupportedError.__str__()
self.assertIn('invalid', string)
def test_InvalidTypeError_implements_str(self):
self.assertTrue(self._InvalidTypeError.__str__ is not object.__str__)
string = self._InvalidTypeError.__str__()
self.assertIn('invalid', string)
def test_ConfigError_implements_str(self):
self.assertTrue(self._ConfigError.__str__ is not object.__str__)
string = self._ConfigError.__str__()
self.assertIn('invalid', string)
def test_BadVerbError_implements_str(self):
self.assertTrue(self._BadVerbError.__str__ is not object.__str__)
string = self._BadVerbError.__str__()
self.assertIn('invalid', string)
def test_SerializationError_implements_str(self):
self.assertTrue(self._SerializationError.__str__ is not object.__str__)
string = self._SerializationError.__str__()
self.assertIn('invalid', string)
def test_DeSerializationError_implements_str(self):
self.assertTrue(self._DeSerializationError.__str__ is not object.__str__)
string = self._DeSerializationError.__str__()
self.assertIn('invalid', string)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testErrors']
unittest.main()
|
en
| 0.541066
|
Created on 8 Dec 2017
@author: walandre #import sys;sys.argv = ['', 'Test.testErrors']
| 2.510822
| 3
|
tasks/views.py
|
bangalorebyte-cohort29-1911/SampleCRM
| 0
|
6625648
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
TemplateView, UpdateView, View)
# from accounts.models import Account
from common.access_decorators_mixins import (MarketingAccessRequiredMixin,
SalesAccessRequiredMixin, marketing_access_required, sales_access_required)
from common.models import Attachments, Comment, User
from common.tasks import send_email_user_mentions
from contacts.models import Contact
from tasks.celery_tasks import send_email
from tasks.forms import TaskAttachmentForm, TaskCommentForm, TaskForm
from tasks.models import Task
from tasks.utils import *
from teams.models import Teams
@login_required
@sales_access_required
def tasks_list(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = Task.objects.all().distinct()
else:
tasks = Task.objects.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
if request.method == 'POST':
tasks = Task.objects.filter()
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = tasks
else:
tasks = Task.objects.filter(created_by=request.user)
if request.POST.get('task_title', None):
tasks = tasks.filter(
title__icontains=request.POST.get('task_title'))
if request.POST.get('status', None):
tasks = tasks.filter(status=request.POST.get('status'))
if request.POST.get('priority', None):
tasks = tasks.filter(priority=request.POST.get('priority'))
tasks = tasks.distinct()
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
@login_required
@sales_access_required
def task_create(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
accounts = None
# elif request.user.google.all():
# users = []
# accounts = Account.objects.filter(created_by=request.user).filter(status="open")
else:
users = User.objects.filter(role='ADMIN').order_by('email')
accounts = None
form = TaskForm(request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'users': users, 'accounts':accounts,
"teams": Teams.objects.all(),
})
if request.method == 'POST':
form = TaskForm(request.POST, request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
task.created_by = request.user
task.save()
task.assigned_to.add(*request.POST.getlist('assigned_to'))
task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.add(*request.POST.getlist('teams'))
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(task.assigned_to.all().values_list('id', flat=True))
send_email.delay(task.id, assigned_to_list, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_detail(request, task_id):
task = get_object_or_404(Task, pk=task_id)
user_assigned_account = False
user_assigned_accounts = set(request.user.account_assigned_users.values_list('id', flat=True))
if task.account:
task_accounts = set([task.account.id])
else:
task_accounts = set()
if user_assigned_accounts.intersection(task_accounts):
user_assigned_account = True
if not ((request.user.role == 'ADMIN') or
(request.user.is_superuser) or
(task.created_by == request.user) or
(request.user in task.assigned_to.all()) or
user_assigned_account):
raise PermissionDenied
if request.method == 'GET':
# if Task.objects.filter(id=task_id).exists():
# task = Task.objects.select_related('account').prefetch_related(
# 'assigned_to', 'contacts').get(id=task_id)
attachments = task.tasks_attachment.all()
comments = task.tasks_comments.all()
if request.user.is_superuser or request.user.role == 'ADMIN':
users_mention = list(User.objects.filter(is_active=True).values('username'))
elif request.user != task.created_by:
users_mention = [{'username': task.created_by.username}]
else:
users_mention = list(task.assigned_to.all().values('username'))
return render(request, 'task_detail.html',
{'task': task, 'users_mention': users_mention,
'attachments': attachments, 'comments': comments})
@login_required
@sales_access_required
def task_edit(request, task_id):
task_obj = get_object_or_404(Task, pk=task_id)
accounts = None
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user):
raise PermissionDenied
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
users = []
else:
users = User.objects.filter(role='ADMIN').order_by('email')
# form = TaskForm(request_user=request.user)
form = TaskForm(instance=task_obj, request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'task_obj': task_obj,
'users': users, 'accounts':accounts, "teams": Teams.objects.all(),})
if request.method == 'POST':
form = TaskForm(request.POST, instance=task_obj,
request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
previous_assigned_to_users = list(task_obj.assigned_to.all().values_list('id', flat=True))
task.save()
form.save_m2m()
# task.assigned_to.clear()
# task.contacts.clear()
# task.assigned_to.add(*request.POST.getlist('assigned_to'))
# task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.clear()
task.teams.add(*request.POST.getlist('teams'))
else:
task.teams.clear()
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(task.assigned_to.all().values_list('id', flat=True))
recipients = list(set(assigned_to_list) - set(previous_assigned_to_users))
send_email.delay(task.id, recipients, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_delete(request, task_id):
task_obj = get_object_or_404(Task, pk=task_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user):
raise PermissionDenied
if request.method == 'GET':
task_obj.delete()
if request.GET.get('view_account', None):
return redirect(reverse('accounts:view_account', args=(request.GET.get('view_account'),)))
return redirect('tasks:tasks_list')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = TaskCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.task = self.task
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_on_arrow": comment.commented_on_arrow,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = TaskCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = TaskAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.task = self.task
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_on_arrow": attachment.created_on_arrow,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.sites.shortcuts import get_current_site
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render, reverse
from django.views.generic import (CreateView, DeleteView, DetailView, FormView,
TemplateView, UpdateView, View)
# from accounts.models import Account
from common.access_decorators_mixins import (MarketingAccessRequiredMixin,
SalesAccessRequiredMixin, marketing_access_required, sales_access_required)
from common.models import Attachments, Comment, User
from common.tasks import send_email_user_mentions
from contacts.models import Contact
from tasks.celery_tasks import send_email
from tasks.forms import TaskAttachmentForm, TaskCommentForm, TaskForm
from tasks.models import Task
from tasks.utils import *
from teams.models import Teams
@login_required
@sales_access_required
def tasks_list(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = Task.objects.all().distinct()
else:
tasks = Task.objects.filter(
Q(created_by=request.user) | Q(assigned_to=request.user)).distinct()
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
if request.method == 'POST':
tasks = Task.objects.filter()
if request.user.role == 'ADMIN' or request.user.is_superuser:
tasks = tasks
else:
tasks = Task.objects.filter(created_by=request.user)
if request.POST.get('task_title', None):
tasks = tasks.filter(
title__icontains=request.POST.get('task_title'))
if request.POST.get('status', None):
tasks = tasks.filter(status=request.POST.get('status'))
if request.POST.get('priority', None):
tasks = tasks.filter(priority=request.POST.get('priority'))
tasks = tasks.distinct()
today = datetime.today().date()
return render(request, 'tasks_tasks_list.html', {'tasks': tasks, 'today': today, 'status_choices': STATUS_CHOICES, 'priority_choices': PRIORITY_CHOICES})
@login_required
@sales_access_required
def task_create(request):
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
accounts = None
# elif request.user.google.all():
# users = []
# accounts = Account.objects.filter(created_by=request.user).filter(status="open")
else:
users = User.objects.filter(role='ADMIN').order_by('email')
accounts = None
form = TaskForm(request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'users': users, 'accounts':accounts,
"teams": Teams.objects.all(),
})
if request.method == 'POST':
form = TaskForm(request.POST, request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
task.created_by = request.user
task.save()
task.assigned_to.add(*request.POST.getlist('assigned_to'))
task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.add(*request.POST.getlist('teams'))
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(task.assigned_to.all().values_list('id', flat=True))
send_email.delay(task.id, assigned_to_list, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_detail(request, task_id):
task = get_object_or_404(Task, pk=task_id)
user_assigned_account = False
user_assigned_accounts = set(request.user.account_assigned_users.values_list('id', flat=True))
if task.account:
task_accounts = set([task.account.id])
else:
task_accounts = set()
if user_assigned_accounts.intersection(task_accounts):
user_assigned_account = True
if not ((request.user.role == 'ADMIN') or
(request.user.is_superuser) or
(task.created_by == request.user) or
(request.user in task.assigned_to.all()) or
user_assigned_account):
raise PermissionDenied
if request.method == 'GET':
# if Task.objects.filter(id=task_id).exists():
# task = Task.objects.select_related('account').prefetch_related(
# 'assigned_to', 'contacts').get(id=task_id)
attachments = task.tasks_attachment.all()
comments = task.tasks_comments.all()
if request.user.is_superuser or request.user.role == 'ADMIN':
users_mention = list(User.objects.filter(is_active=True).values('username'))
elif request.user != task.created_by:
users_mention = [{'username': task.created_by.username}]
else:
users_mention = list(task.assigned_to.all().values('username'))
return render(request, 'task_detail.html',
{'task': task, 'users_mention': users_mention,
'attachments': attachments, 'comments': comments})
@login_required
@sales_access_required
def task_edit(request, task_id):
task_obj = get_object_or_404(Task, pk=task_id)
accounts = None
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user):
raise PermissionDenied
if request.method == 'GET':
if request.user.role == 'ADMIN' or request.user.is_superuser:
users = User.objects.filter(is_active=True).order_by('email')
elif request.user.google.all():
users = []
else:
users = User.objects.filter(role='ADMIN').order_by('email')
# form = TaskForm(request_user=request.user)
form = TaskForm(instance=task_obj, request_user=request.user)
return render(request, 'task_create.html', {'form': form, 'task_obj': task_obj,
'users': users, 'accounts':accounts, "teams": Teams.objects.all(),})
if request.method == 'POST':
form = TaskForm(request.POST, instance=task_obj,
request_user=request.user)
if form.is_valid():
task = form.save(commit=False)
previous_assigned_to_users = list(task_obj.assigned_to.all().values_list('id', flat=True))
task.save()
form.save_m2m()
# task.assigned_to.clear()
# task.contacts.clear()
# task.assigned_to.add(*request.POST.getlist('assigned_to'))
# task.contacts.add(*request.POST.getlist('contacts'))
if request.POST.getlist('teams', []):
user_ids = Teams.objects.filter(id__in=request.POST.getlist('teams')).values_list('users', flat=True)
assinged_to_users_ids = task.assigned_to.all().values_list('id', flat=True)
for user_id in user_ids:
if user_id not in assinged_to_users_ids:
task.assigned_to.add(user_id)
if request.POST.getlist('teams', []):
task.teams.clear()
task.teams.add(*request.POST.getlist('teams'))
else:
task.teams.clear()
kwargs = {'domain': request.get_host(), 'protocol': request.scheme}
assigned_to_list = list(task.assigned_to.all().values_list('id', flat=True))
recipients = list(set(assigned_to_list) - set(previous_assigned_to_users))
send_email.delay(task.id, recipients, **kwargs)
success_url = reverse('tasks:tasks_list')
if request.POST.get('from_account'):
success_url = reverse('accounts:view_account', args=(request.POST.get('from_account'),))
return JsonResponse({'error': False, 'success_url': success_url})
else:
return JsonResponse({'error': True, 'errors': form.errors})
@login_required
@sales_access_required
def task_delete(request, task_id):
task_obj = get_object_or_404(Task, pk=task_id)
if not (request.user.role == 'ADMIN' or request.user.is_superuser or task_obj.created_by == request.user):
raise PermissionDenied
if request.method == 'GET':
task_obj.delete()
if request.GET.get('view_account', None):
return redirect(reverse('accounts:view_account', args=(request.GET.get('view_account'),)))
return redirect('tasks:tasks_list')
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
form_class = TaskCommentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to comment for this account."}
return JsonResponse(data)
def form_valid(self, form):
comment = form.save(commit=False)
comment.commented_by = self.request.user
comment.task = self.task
comment.save()
comment_id = comment.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": comment.id, "comment": comment.comment,
"commented_on": comment.commented_on,
"commented_on_arrow": comment.commented_on_arrow,
"commented_by": comment.commented_by.email
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class UpdateCommentView(LoginRequiredMixin, View):
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.comment_obj = get_object_or_404(
Comment, id=request.POST.get("commentid"))
if request.user == self.comment_obj.commented_by:
form = TaskCommentForm(request.POST, instance=self.comment_obj)
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {'error': "You don't have permission to edit this comment."}
return JsonResponse(data)
def form_valid(self, form):
self.comment_obj.comment = form.cleaned_data.get("comment")
self.comment_obj.save(update_fields=["comment"])
comment_id = self.comment_obj.id
current_site = get_current_site(self.request)
send_email_user_mentions.delay(comment_id, 'tasks', domain=current_site.domain,
protocol=self.request.scheme)
return JsonResponse({
"comment_id": self.comment_obj.id,
"comment": self.comment_obj.comment,
})
def form_invalid(self, form):
return JsonResponse({"error": form['comment'].errors})
class DeleteCommentView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Comment, id=request.POST.get("comment_id"))
if request.user == self.object.commented_by:
self.object.delete()
data = {"cid": request.POST.get("comment_id")}
return JsonResponse(data)
data = {'error': "You don't have permission to delete this comment."}
return JsonResponse(data)
class AddAttachmentView(LoginRequiredMixin, CreateView):
model = Attachments
form_class = TaskAttachmentForm
http_method_names = ["post"]
def post(self, request, *args, **kwargs):
self.object = None
self.task = get_object_or_404(
Task, id=request.POST.get('task_id'))
if (
request.user == self.task.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
return self.form_invalid(form)
data = {
'error': "You don't have permission to add attachment \
for this account."}
return JsonResponse(data)
def form_valid(self, form):
attachment = form.save(commit=False)
attachment.created_by = self.request.user
attachment.file_name = attachment.attachment.name
attachment.task = self.task
attachment.save()
return JsonResponse({
"attachment_id": attachment.id,
"attachment": attachment.file_name,
"attachment_url": attachment.attachment.url,
"download_url": reverse('common:download_attachment',
kwargs={'pk': attachment.id}),
"attachment_display": attachment.get_file_type_display(),
"created_on": attachment.created_on,
"created_on_arrow": attachment.created_on_arrow,
"created_by": attachment.created_by.email,
"file_type": attachment.file_type()
})
def form_invalid(self, form):
return JsonResponse({"error": form['attachment'].errors})
class DeleteAttachmentsView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
self.object = get_object_or_404(
Attachments, id=request.POST.get("attachment_id"))
if (
request.user == self.object.created_by or
request.user.is_superuser or
request.user.role == 'ADMIN'
):
self.object.delete()
data = {"acd": request.POST.get("attachment_id")}
return JsonResponse(data)
data = {
'error': "You don't have permission to delete this attachment."}
return JsonResponse(data)
|
en
| 0.32116
|
# from accounts.models import Account # elif request.user.google.all(): # users = [] # accounts = Account.objects.filter(created_by=request.user).filter(status="open") # if Task.objects.filter(id=task_id).exists(): # task = Task.objects.select_related('account').prefetch_related( # 'assigned_to', 'contacts').get(id=task_id) # form = TaskForm(request_user=request.user) # task.assigned_to.clear() # task.contacts.clear() # task.assigned_to.add(*request.POST.getlist('assigned_to')) # task.contacts.add(*request.POST.getlist('contacts'))
| 1.917634
| 2
|
tempest/api/identity/admin/v3/test_credentials.py
|
Hybrid-Cloud/hybrid-tempest
| 3
|
6625649
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def resource_setup(cls):
super(CredentialsTestJSON, cls).resource_setup()
cls.projects = list()
cls.creds_list = [['project_id', 'user_id', 'id'],
['access', 'secret']]
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
u_email = <EMAIL>' % u_name
u_password = data_utils.rand_password()
for i in range(2):
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'))['project']
cls.projects.append(cls.project['id'])
cls.user_body = cls.users_client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])['user']
@classmethod
def resource_cleanup(cls):
cls.users_client.delete_user(cls.user_body['id'])
for p in cls.projects:
cls.projects_client.delete_project(p)
super(CredentialsTestJSON, cls).resource_cleanup()
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
@test.attr(type='smoke')
@test.idempotent_id('7cd59bf9-bda4-4c72-9467-d21cab278355')
def test_credentials_create_get_update_delete(self):
blob = '{"access": "%s", "secret": "%s"}' % (
data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
cred = self.creds_client.create_credential(
user_id=self.user_body['id'], project_id=self.projects[0],
blob=blob, type='ec2')['credential']
self.addCleanup(self._delete_credential, cred['id'])
for value1 in self.creds_list[0]:
self.assertIn(value1, cred)
for value2 in self.creds_list[1]:
self.assertIn(value2, cred['blob'])
new_keys = [data_utils.rand_name('NewAccess'),
data_utils.rand_name('NewSecret')]
blob = '{"access": "%s", "secret": "%s"}' % (new_keys[0], new_keys[1])
update_body = self.creds_client.update_credential(
cred['id'], blob=blob, project_id=self.projects[1],
type='ec2')['credential']
self.assertEqual(cred['id'], update_body['id'])
self.assertEqual(self.projects[1], update_body['project_id'])
self.assertEqual(self.user_body['id'], update_body['user_id'])
self.assertEqual(update_body['blob']['access'], new_keys[0])
self.assertEqual(update_body['blob']['secret'], new_keys[1])
get_body = self.creds_client.show_credential(cred['id'])['credential']
for value1 in self.creds_list[0]:
self.assertEqual(update_body[value1],
get_body[value1])
for value2 in self.creds_list[1]:
self.assertEqual(update_body['blob'][value2],
get_body['blob'][value2])
@test.idempotent_id('13202c00-0021-42a1-88d4-81b44d448aab')
def test_credentials_list_delete(self):
created_cred_ids = list()
fetched_cred_ids = list()
for i in range(2):
blob = '{"access": "%s", "secret": "%s"}' % (
data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
cred = self.creds_client.create_credential(
user_id=self.user_body['id'], project_id=self.projects[0],
blob=blob, type='ec2')['credential']
created_cred_ids.append(cred['id'])
self.addCleanup(self._delete_credential, cred['id'])
creds = self.creds_client.list_credentials()['credentials']
for i in creds:
fetched_cred_ids.append(i['id'])
missing_creds = [c for c in created_cred_ids
if c not in fetched_cred_ids]
self.assertEqual(0, len(missing_creds),
"Failed to find cred %s in fetched list" %
', '.join(m_cred for m_cred in missing_creds))
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest.common.utils import data_utils
from tempest import test
class CredentialsTestJSON(base.BaseIdentityV3AdminTest):
@classmethod
def resource_setup(cls):
super(CredentialsTestJSON, cls).resource_setup()
cls.projects = list()
cls.creds_list = [['project_id', 'user_id', 'id'],
['access', 'secret']]
u_name = data_utils.rand_name('user')
u_desc = '%s description' % u_name
u_email = <EMAIL>' % u_name
u_password = data_utils.rand_password()
for i in range(2):
cls.project = cls.projects_client.create_project(
data_utils.rand_name('project'),
description=data_utils.rand_name('project-desc'))['project']
cls.projects.append(cls.project['id'])
cls.user_body = cls.users_client.create_user(
u_name, description=u_desc, password=u_password,
email=u_email, project_id=cls.projects[0])['user']
@classmethod
def resource_cleanup(cls):
cls.users_client.delete_user(cls.user_body['id'])
for p in cls.projects:
cls.projects_client.delete_project(p)
super(CredentialsTestJSON, cls).resource_cleanup()
def _delete_credential(self, cred_id):
self.creds_client.delete_credential(cred_id)
@test.attr(type='smoke')
@test.idempotent_id('7cd59bf9-bda4-4c72-9467-d21cab278355')
def test_credentials_create_get_update_delete(self):
blob = '{"access": "%s", "secret": "%s"}' % (
data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
cred = self.creds_client.create_credential(
user_id=self.user_body['id'], project_id=self.projects[0],
blob=blob, type='ec2')['credential']
self.addCleanup(self._delete_credential, cred['id'])
for value1 in self.creds_list[0]:
self.assertIn(value1, cred)
for value2 in self.creds_list[1]:
self.assertIn(value2, cred['blob'])
new_keys = [data_utils.rand_name('NewAccess'),
data_utils.rand_name('NewSecret')]
blob = '{"access": "%s", "secret": "%s"}' % (new_keys[0], new_keys[1])
update_body = self.creds_client.update_credential(
cred['id'], blob=blob, project_id=self.projects[1],
type='ec2')['credential']
self.assertEqual(cred['id'], update_body['id'])
self.assertEqual(self.projects[1], update_body['project_id'])
self.assertEqual(self.user_body['id'], update_body['user_id'])
self.assertEqual(update_body['blob']['access'], new_keys[0])
self.assertEqual(update_body['blob']['secret'], new_keys[1])
get_body = self.creds_client.show_credential(cred['id'])['credential']
for value1 in self.creds_list[0]:
self.assertEqual(update_body[value1],
get_body[value1])
for value2 in self.creds_list[1]:
self.assertEqual(update_body['blob'][value2],
get_body['blob'][value2])
@test.idempotent_id('13202c00-0021-42a1-88d4-81b44d448aab')
def test_credentials_list_delete(self):
created_cred_ids = list()
fetched_cred_ids = list()
for i in range(2):
blob = '{"access": "%s", "secret": "%s"}' % (
data_utils.rand_name('Access'), data_utils.rand_name('Secret'))
cred = self.creds_client.create_credential(
user_id=self.user_body['id'], project_id=self.projects[0],
blob=blob, type='ec2')['credential']
created_cred_ids.append(cred['id'])
self.addCleanup(self._delete_credential, cred['id'])
creds = self.creds_client.list_credentials()['credentials']
for i in creds:
fetched_cred_ids.append(i['id'])
missing_creds = [c for c in created_cred_ids
if c not in fetched_cred_ids]
self.assertEqual(0, len(missing_creds),
"Failed to find cred %s in fetched list" %
', '.join(m_cred for m_cred in missing_creds))
|
en
| 0.845807
|
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License.
| 1.894242
| 2
|
src/forms.py
|
Amazeryogo/CrispyDB
| 1
|
6625650
|
from flask_wtf import FlaskForm
import wtforms
class LoginForm(FlaskForm):
username = wtforms.StringField('Username', validators=[wtforms.validators.DataRequired()])
password = wtforms.PasswordField('Password', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Login')
class NewCollectionForm(FlaskForm):
name = wtforms.StringField('Make A New Collection', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Create')
class Changeauth(FlaskForm):
old_password = wtforms.PasswordField('<PASSWORD> Password', validators=[wtforms.validators.DataRequired()])
new_password = wtforms.PasswordField('<PASSWORD>', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Change')
|
from flask_wtf import FlaskForm
import wtforms
class LoginForm(FlaskForm):
username = wtforms.StringField('Username', validators=[wtforms.validators.DataRequired()])
password = wtforms.PasswordField('Password', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Login')
class NewCollectionForm(FlaskForm):
name = wtforms.StringField('Make A New Collection', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Create')
class Changeauth(FlaskForm):
old_password = wtforms.PasswordField('<PASSWORD> Password', validators=[wtforms.validators.DataRequired()])
new_password = wtforms.PasswordField('<PASSWORD>', validators=[wtforms.validators.DataRequired()])
submit = wtforms.SubmitField('Change')
|
none
| 1
| 2.721728
| 3
|